Spaces:
Sleeping
Sleeping
Sasha
commited on
Commit
·
4474a2c
1
Parent(s):
03a66e4
adding some hacky task type detection
Browse files
app.py
CHANGED
|
@@ -23,7 +23,9 @@ top_datasets= ['glue', 'super_glue', 'wikitext', 'imdb', 'squad', 'squad_es', \
|
|
| 23 |
'sick', 'xsum', 'wikiann', 'yelp_polarity', 'hellaswag', 'piqa', \
|
| 24 |
'race', 'winogrande']
|
| 25 |
|
| 26 |
-
tasks= ['text
|
|
|
|
|
|
|
| 27 |
metrics= ['matthews_correlation', 'perplexity', 'meteor', 'code_eval', 'super_glue', 'rouge', 'mauve', 'cer', 'accuracy', 'recall', 'bleurt', 'sari', 'precision', 'mean_iou', 'squad', 'mahalanobis', 'chrf', 'mae', 'squad_v2', 'seqeval', 'cuad', 'wiki_split', 'google_bleu', 'competition_math', 'pearsonr', 'xtreme_s', 'comet', 'gleu', 'spearmanr', 'f1', 'frugalscore', 'bertscore', 'indic_glue', 'mse', 'xnli', 'ter', 'coval', 'wer', 'bleu', 'glue', 'sacrebleu']
|
| 28 |
|
| 29 |
with st.sidebar.expander("Datasets", expanded=True):
|
|
@@ -50,11 +52,12 @@ st.markdown("### Description")
|
|
| 50 |
st.markdown(dataset_builder.info.description)
|
| 51 |
st.markdown("For more information about this dataset, check out [its website](https://huggingface.co/datasets/"+dataset_name+")")
|
| 52 |
|
|
|
|
| 53 |
st.markdown("### Dataset-Specific Metrics")
|
| 54 |
if dataset_name in metrics:
|
| 55 |
st.markdown("Great news! Your dataset has a dedicated metric for it! You can use it like this:")
|
| 56 |
code = ''' from datasets import load_metric
|
| 57 |
-
metric = load_metric('''+
|
| 58 |
st.code(code, language='python')
|
| 59 |
dedicated_metric = True
|
| 60 |
else:
|
|
@@ -65,14 +68,20 @@ st.markdown("### Task-Specific Metrics")
|
|
| 65 |
|
| 66 |
try:
|
| 67 |
task = dataset_builder.info.task_templates[0].task
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
if task == 'automatic-speech-recognition':
|
| 70 |
st.markdown('Automatic Speech Recognition has some dedicated metrics such as:')
|
| 71 |
st.markdown('[Word Error Rate](https://huggingface.co/metrics/wer)')
|
| 72 |
st.markdown('[Character Error Rate](https://huggingface.co/metrics/cer)')
|
| 73 |
-
|
| 74 |
-
st.markdown("The task for your dataset doesn't have any dedicated metrics, but you can still use general ones!")
|
| 75 |
-
except:
|
| 76 |
st.markdown("The task for your dataset doesn't have any dedicated metrics, but you can still use general ones!")
|
| 77 |
|
| 78 |
|
|
@@ -106,14 +115,14 @@ try:
|
|
| 106 |
#proportion = [0.85, 0.15]
|
| 107 |
stdev_dataset= statistics.stdev(proportion)
|
| 108 |
if stdev_dataset <= balanced_stdev:
|
| 109 |
-
st.markdown("Since your dataset is well-balanced, you can look at using:")
|
| 110 |
st.markdown('[Accuracy](https://huggingface.co/metrics/accuracy)')
|
| 111 |
accuracy_code = '''from datasets import load_metric
|
| 112 |
metric = load_metric("accuracy")'''
|
| 113 |
st.code(accuracy_code, language='python')
|
| 114 |
|
| 115 |
else:
|
| 116 |
-
st.markdown("Since your dataset is not well-balanced, you can look at using:")
|
| 117 |
st.markdown('[F1 Score](https://huggingface.co/metrics/f1)')
|
| 118 |
accuracy_code = '''from datasets import load_metric
|
| 119 |
metric = load_metric("accuracy")'''
|
|
|
|
| 23 |
'sick', 'xsum', 'wikiann', 'yelp_polarity', 'hellaswag', 'piqa', \
|
| 24 |
'race', 'winogrande']
|
| 25 |
|
| 26 |
+
tasks= ['text classification', 'question answering', 'automatic speech recognition', 'natural language inference', \
|
| 27 |
+
'machine translation', 'sentiment analysis', 'text simplification', 'named entity recognition', \
|
| 28 |
+
'reading comprehension']
|
| 29 |
metrics= ['matthews_correlation', 'perplexity', 'meteor', 'code_eval', 'super_glue', 'rouge', 'mauve', 'cer', 'accuracy', 'recall', 'bleurt', 'sari', 'precision', 'mean_iou', 'squad', 'mahalanobis', 'chrf', 'mae', 'squad_v2', 'seqeval', 'cuad', 'wiki_split', 'google_bleu', 'competition_math', 'pearsonr', 'xtreme_s', 'comet', 'gleu', 'spearmanr', 'f1', 'frugalscore', 'bertscore', 'indic_glue', 'mse', 'xnli', 'ter', 'coval', 'wer', 'bleu', 'glue', 'sacrebleu']
|
| 30 |
|
| 31 |
with st.sidebar.expander("Datasets", expanded=True):
|
|
|
|
| 52 |
st.markdown(dataset_builder.info.description)
|
| 53 |
st.markdown("For more information about this dataset, check out [its website](https://huggingface.co/datasets/"+dataset_name+")")
|
| 54 |
|
| 55 |
+
|
| 56 |
st.markdown("### Dataset-Specific Metrics")
|
| 57 |
if dataset_name in metrics:
|
| 58 |
st.markdown("Great news! Your dataset has a dedicated metric for it! You can use it like this:")
|
| 59 |
code = ''' from datasets import load_metric
|
| 60 |
+
metric = load_metric('''+dataset_name+''', '''+dataset_config+''')'''
|
| 61 |
st.code(code, language='python')
|
| 62 |
dedicated_metric = True
|
| 63 |
else:
|
|
|
|
| 68 |
|
| 69 |
try:
|
| 70 |
task = dataset_builder.info.task_templates[0].task
|
| 71 |
+
except:
|
| 72 |
+
for t in tasks:
|
| 73 |
+
if t in str(dataset_builder.info.description).lower():
|
| 74 |
+
task = t
|
| 75 |
+
else:
|
| 76 |
+
task = None
|
| 77 |
+
|
| 78 |
+
if task is not None:
|
| 79 |
+
st.markdown("The task associated to it your dataset is: " + task.replace('-',' '))
|
| 80 |
if task == 'automatic-speech-recognition':
|
| 81 |
st.markdown('Automatic Speech Recognition has some dedicated metrics such as:')
|
| 82 |
st.markdown('[Word Error Rate](https://huggingface.co/metrics/wer)')
|
| 83 |
st.markdown('[Character Error Rate](https://huggingface.co/metrics/cer)')
|
| 84 |
+
else:
|
|
|
|
|
|
|
| 85 |
st.markdown("The task for your dataset doesn't have any dedicated metrics, but you can still use general ones!")
|
| 86 |
|
| 87 |
|
|
|
|
| 115 |
#proportion = [0.85, 0.15]
|
| 116 |
stdev_dataset= statistics.stdev(proportion)
|
| 117 |
if stdev_dataset <= balanced_stdev:
|
| 118 |
+
st.markdown("Since your dataset is well-balanced (with a standard deviation of " + str(round(stdev_dataset,2)) +"), you can look at using:")
|
| 119 |
st.markdown('[Accuracy](https://huggingface.co/metrics/accuracy)')
|
| 120 |
accuracy_code = '''from datasets import load_metric
|
| 121 |
metric = load_metric("accuracy")'''
|
| 122 |
st.code(accuracy_code, language='python')
|
| 123 |
|
| 124 |
else:
|
| 125 |
+
st.markdown("Since your dataset is not well-balanced (with a standard deviation of " + str(round(stdev_dataset,2)) +"), you can look at using:")
|
| 126 |
st.markdown('[F1 Score](https://huggingface.co/metrics/f1)')
|
| 127 |
accuracy_code = '''from datasets import load_metric
|
| 128 |
metric = load_metric("accuracy")'''
|