Spaces:
Runtime error
Runtime error
tensorized commited on
Commit ·
50e5fc3
1
Parent(s): ff41c93
testing scitail
Browse files- app.py +282 -0
- ngram.py +74 -0
- streamlit.py +23 -0
- vis_data_card.py +406 -0
app.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import Counter
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import plotly.express as px
|
| 6 |
+
import streamlit as st
|
| 7 |
+
from datasets import load_dataset
|
| 8 |
+
from matplotlib import pyplot as plt
|
| 9 |
+
from matplotlib_venn import venn2, venn3
|
| 10 |
+
from ngram import get_tuples_manual_sentences
|
| 11 |
+
from rich import print as rprint
|
| 12 |
+
|
| 13 |
+
from bigbio.dataloader import BigBioConfigHelpers
|
| 14 |
+
|
| 15 |
+
# from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# vanilla tokenizer
|
| 19 |
+
def tokenizer(text, counter):
|
| 20 |
+
if not text:
|
| 21 |
+
return text, []
|
| 22 |
+
text = text.strip()
|
| 23 |
+
text = text.replace("\t", "")
|
| 24 |
+
text = text.replace("\n", "")
|
| 25 |
+
# split
|
| 26 |
+
text_list = text.split(" ")
|
| 27 |
+
return text, text_list
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def norm(lengths):
|
| 31 |
+
mu = np.mean(lengths)
|
| 32 |
+
sigma = np.std(lengths)
|
| 33 |
+
return mu, sigma
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def load_helper():
|
| 37 |
+
conhelps = BigBioConfigHelpers()
|
| 38 |
+
conhelps = conhelps.filtered(lambda x: x.dataset_name != "pubtator_central")
|
| 39 |
+
conhelps = conhelps.filtered(lambda x: x.is_bigbio_schema)
|
| 40 |
+
conhelps = conhelps.filtered(lambda x: not x.is_local)
|
| 41 |
+
rprint(
|
| 42 |
+
"loaded {} configs from {} datasets".format(
|
| 43 |
+
len(conhelps),
|
| 44 |
+
len(set([helper.dataset_name for helper in conhelps])),
|
| 45 |
+
)
|
| 46 |
+
)
|
| 47 |
+
return conhelps
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
_TEXT_MAPS = {
|
| 51 |
+
"bigbio_kb": ["text"],
|
| 52 |
+
"bigbio_text": ["text"],
|
| 53 |
+
"bigbio_qa": ["question", "context"],
|
| 54 |
+
"bigbio_te": ["premise", "hypothesis"],
|
| 55 |
+
"bigbio_tp": ["text_1", "text_2"],
|
| 56 |
+
"bigbio_pairs": ["text_1", "text_2"],
|
| 57 |
+
"bigbio_t2t": ["text_1", "text_2"],
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
IBM_COLORS = [
|
| 61 |
+
"#648fff",
|
| 62 |
+
"#dc267f",
|
| 63 |
+
"#ffb000",
|
| 64 |
+
"#fe6100",
|
| 65 |
+
"#785ef0",
|
| 66 |
+
"#000000",
|
| 67 |
+
"#ffffff",
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
N = 3
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def token_length_per_entry(entry, schema, counter):
|
| 74 |
+
result = {}
|
| 75 |
+
if schema == "bigbio_kb":
|
| 76 |
+
for passage in entry["passages"]:
|
| 77 |
+
result_key = passage["type"]
|
| 78 |
+
for key in _TEXT_MAPS[schema]:
|
| 79 |
+
text = passage[key][0]
|
| 80 |
+
sents, ngrams = get_tuples_manual_sentences(text.lower(), N)
|
| 81 |
+
toks = [tok for sent in sents for tok in sent]
|
| 82 |
+
tups = ["_".join(tup) for tup in ngrams]
|
| 83 |
+
counter.update(tups)
|
| 84 |
+
result[result_key] = len(toks)
|
| 85 |
+
else:
|
| 86 |
+
for key in _TEXT_MAPS[schema]:
|
| 87 |
+
text = entry[key]
|
| 88 |
+
sents, ngrams = get_tuples_manual_sentences(text.lower(), N)
|
| 89 |
+
toks = [tok for sent in sents for tok in sent]
|
| 90 |
+
result[key] = len(toks)
|
| 91 |
+
tups = ["_".join(tup) for tup in ngrams]
|
| 92 |
+
counter.update(tups)
|
| 93 |
+
return result, counter
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def parse_token_length_and_n_gram(dataset, data_config, st=None):
|
| 97 |
+
hist_data = []
|
| 98 |
+
n_gram_counters = []
|
| 99 |
+
rprint(data_config)
|
| 100 |
+
for split, data in dataset.items():
|
| 101 |
+
my_bar = st.progress(0)
|
| 102 |
+
total = len(data)
|
| 103 |
+
n_gram_counter = Counter()
|
| 104 |
+
for i, entry in enumerate(data):
|
| 105 |
+
my_bar.progress(int(i / total * 100))
|
| 106 |
+
result, n_gram_counter = token_length_per_entry(
|
| 107 |
+
entry, data_config.schema, n_gram_counter
|
| 108 |
+
)
|
| 109 |
+
result["total_token_length"] = sum([v for k, v in result.items()])
|
| 110 |
+
result["split"] = split
|
| 111 |
+
hist_data.append(result)
|
| 112 |
+
# remove single count
|
| 113 |
+
# n_gram_counter = Counter({x: count for x, count in n_gram_counter.items() if count > 1})
|
| 114 |
+
n_gram_counters.append(n_gram_counter)
|
| 115 |
+
my_bar.empty()
|
| 116 |
+
st.write("token lengths complete!")
|
| 117 |
+
|
| 118 |
+
return pd.DataFrame(hist_data), n_gram_counters
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def center_title(fig):
|
| 122 |
+
fig.update_layout(
|
| 123 |
+
title={"y": 0.9, "x": 0.5, "xanchor": "center", "yanchor": "top"},
|
| 124 |
+
font=dict(
|
| 125 |
+
size=18,
|
| 126 |
+
),
|
| 127 |
+
)
|
| 128 |
+
return fig
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def draw_histogram(hist_data, col_name, st=None):
|
| 132 |
+
fig = px.histogram(
|
| 133 |
+
hist_data,
|
| 134 |
+
x=col_name,
|
| 135 |
+
color="split",
|
| 136 |
+
color_discrete_sequence=IBM_COLORS,
|
| 137 |
+
marginal="box", # or violin, rug
|
| 138 |
+
barmode="group",
|
| 139 |
+
hover_data=hist_data.columns,
|
| 140 |
+
histnorm="probability",
|
| 141 |
+
nbins=20,
|
| 142 |
+
title=f"{col_name} distribution by split",
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
st.plotly_chart(center_title(fig), use_container_width=True)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def draw_bar(bar_data, x, y, st=None):
|
| 149 |
+
fig = px.bar(
|
| 150 |
+
bar_data,
|
| 151 |
+
x=x,
|
| 152 |
+
y=y,
|
| 153 |
+
color="split",
|
| 154 |
+
color_discrete_sequence=IBM_COLORS,
|
| 155 |
+
# marginal="box", # or violin, rug
|
| 156 |
+
barmode="group",
|
| 157 |
+
hover_data=bar_data.columns,
|
| 158 |
+
title=f"{y} distribution by split",
|
| 159 |
+
)
|
| 160 |
+
st.plotly_chart(center_title(fig), use_container_width=True)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def parse_metrics(metadata, st=None):
|
| 164 |
+
for k, m in metadata.items():
|
| 165 |
+
mattrs = m.__dict__
|
| 166 |
+
for m, attr in mattrs.items():
|
| 167 |
+
if type(attr) == int and attr > 0:
|
| 168 |
+
st.metric(label=f"{k}-{m}", value=attr)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def parse_counters(metadata):
|
| 172 |
+
metadata = metadata["train"] # using the training counter to fetch the names
|
| 173 |
+
counters = []
|
| 174 |
+
for k, v in metadata.__dict__.items():
|
| 175 |
+
if "counter" in k and len(v) > 0:
|
| 176 |
+
counters.append(k)
|
| 177 |
+
return counters
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# generate the df for histogram
|
| 181 |
+
def parse_label_counter(metadata, counter_type):
|
| 182 |
+
hist_data = []
|
| 183 |
+
for split, m in metadata.items():
|
| 184 |
+
metadata_counter = getattr(m, counter_type)
|
| 185 |
+
for k, v in metadata_counter.items():
|
| 186 |
+
row = {}
|
| 187 |
+
row["labels"] = k
|
| 188 |
+
row[counter_type] = v
|
| 189 |
+
row["split"] = split
|
| 190 |
+
hist_data.append(row)
|
| 191 |
+
return pd.DataFrame(hist_data)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
if __name__ == "__main__":
|
| 195 |
+
# load helpers
|
| 196 |
+
conhelps = load_helper()
|
| 197 |
+
configs_set = set()
|
| 198 |
+
|
| 199 |
+
for conhelper in conhelps:
|
| 200 |
+
configs_set.add(conhelper.dataset_name)
|
| 201 |
+
# st.write(sorted(configs_set))
|
| 202 |
+
|
| 203 |
+
# setup page, sidebar, columns
|
| 204 |
+
st.set_page_config(layout="wide")
|
| 205 |
+
s = st.session_state
|
| 206 |
+
if not s:
|
| 207 |
+
s.pressed_first_button = False
|
| 208 |
+
data_name = st.sidebar.selectbox("dataset", sorted(configs_set))
|
| 209 |
+
st.sidebar.write("you selected:", data_name)
|
| 210 |
+
st.header(f"Dataset stats for {data_name}")
|
| 211 |
+
|
| 212 |
+
# setup data configs
|
| 213 |
+
data_helpers = conhelps.for_dataset(data_name)
|
| 214 |
+
data_configs = [d.config for d in data_helpers]
|
| 215 |
+
data_config_names = [d.config.name for d in data_helpers]
|
| 216 |
+
data_config_name = st.sidebar.selectbox("config", set(data_config_names))
|
| 217 |
+
|
| 218 |
+
if st.sidebar.button("fetch") or s.pressed_first_button:
|
| 219 |
+
s.pressed_first_button = True
|
| 220 |
+
helper = conhelps.for_config_name(data_config_name)
|
| 221 |
+
metadata_helper = helper.get_metadata()
|
| 222 |
+
|
| 223 |
+
parse_metrics(metadata_helper, st.sidebar)
|
| 224 |
+
|
| 225 |
+
# load HF dataset
|
| 226 |
+
data_idx = data_config_names.index(data_config_name)
|
| 227 |
+
data_config = data_configs[data_idx]
|
| 228 |
+
# st.write(data_name)
|
| 229 |
+
|
| 230 |
+
dataset = load_dataset(
|
| 231 |
+
f"bigbio/{data_name}", name=data_config_name
|
| 232 |
+
)
|
| 233 |
+
ds = pd.DataFrame(dataset["train"])
|
| 234 |
+
st.write(ds)
|
| 235 |
+
# general token length
|
| 236 |
+
tok_hist_data, ngram_counters = parse_token_length_and_n_gram(
|
| 237 |
+
dataset, data_config, st.sidebar
|
| 238 |
+
)
|
| 239 |
+
# draw token distribution
|
| 240 |
+
draw_histogram(tok_hist_data, "total_token_length", st)
|
| 241 |
+
# general counter(s)
|
| 242 |
+
col1, col2 = st.columns([1, 6])
|
| 243 |
+
counters = parse_counters(metadata_helper)
|
| 244 |
+
counter_type = col1.selectbox("counter_type", counters)
|
| 245 |
+
label_df = parse_label_counter(metadata_helper, counter_type)
|
| 246 |
+
label_max = int(label_df[counter_type].max() - 1)
|
| 247 |
+
label_min = int(label_df[counter_type].min())
|
| 248 |
+
filter_value = col1.slider("counter_filter (min, max)", label_min, label_max)
|
| 249 |
+
label_df = label_df[label_df[counter_type] >= filter_value]
|
| 250 |
+
# draw bar chart for counter
|
| 251 |
+
draw_bar(label_df, "labels", counter_type, col2)
|
| 252 |
+
venn_fig, ax = plt.subplots()
|
| 253 |
+
if len(ngram_counters) == 2:
|
| 254 |
+
union_counter = ngram_counters[0] + ngram_counters[1]
|
| 255 |
+
print(ngram_counters[0].most_common(10))
|
| 256 |
+
print(ngram_counters[1].most_common(10))
|
| 257 |
+
total = len(union_counter.keys())
|
| 258 |
+
ngram_counter_sets = [
|
| 259 |
+
set(ngram_counter.keys()) for ngram_counter in ngram_counters
|
| 260 |
+
]
|
| 261 |
+
venn2(
|
| 262 |
+
ngram_counter_sets,
|
| 263 |
+
dataset.keys(),
|
| 264 |
+
set_colors=IBM_COLORS[:3],
|
| 265 |
+
subset_label_formatter=lambda x: f"{(x/total):1.0%}",
|
| 266 |
+
)
|
| 267 |
+
else:
|
| 268 |
+
union_counter = ngram_counters[0] + ngram_counters[1] + ngram_counters[2]
|
| 269 |
+
total = len(union_counter.keys())
|
| 270 |
+
ngram_counter_sets = [
|
| 271 |
+
set(ngram_counter.keys()) for ngram_counter in ngram_counters
|
| 272 |
+
]
|
| 273 |
+
venn3(
|
| 274 |
+
ngram_counter_sets,
|
| 275 |
+
dataset.keys(),
|
| 276 |
+
set_colors=IBM_COLORS[:4],
|
| 277 |
+
subset_label_formatter=lambda x: f"{(x/total):1.0%}",
|
| 278 |
+
)
|
| 279 |
+
venn_fig.suptitle(f"{N}-gram intersection for {data_name}", fontsize=20)
|
| 280 |
+
st.pyplot(venn_fig)
|
| 281 |
+
|
| 282 |
+
st.sidebar.button("Re-run")
|
ngram.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# partially from https://gist.github.com/gaulinmp/da5825de975ed0ea6a24186434c24fe4
|
| 2 |
+
from nltk.util import ngrams
|
| 3 |
+
from nltk.corpus import stopwords
|
| 4 |
+
import spacy
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import re
|
| 7 |
+
from itertools import chain
|
| 8 |
+
from collections import Counter
|
| 9 |
+
from datasets import load_dataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
nlp = spacy.load("en_core_web_sm")
|
| 13 |
+
nlp.add_pipe("sentencizer")
|
| 14 |
+
STOPWORDS = nlp.Defaults.stop_words
|
| 15 |
+
|
| 16 |
+
N = 5
|
| 17 |
+
re_sent_ends_naive = re.compile(r'[.\n]')
|
| 18 |
+
re_stripper_naive = re.compile('[^a-zA-Z\.\n]')
|
| 19 |
+
|
| 20 |
+
splitter_naive = lambda x: re_sent_ends_naive.split(re_stripper_naive.sub(' ', x))
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# list of tokens for one sentence
|
| 24 |
+
def remove_stop_words(text):
|
| 25 |
+
result = []
|
| 26 |
+
for w in text:
|
| 27 |
+
if w not in STOPWORDS:
|
| 28 |
+
result.append(w)
|
| 29 |
+
return result
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# get sentence from multiple sentences
|
| 33 |
+
def parse_sentences(text, nlp):
|
| 34 |
+
doc = nlp(text)
|
| 35 |
+
sentences = (remove_stop_words(sent) for sent in doc.sents)
|
| 36 |
+
return sentences
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_tuples_manual_sentences(txt, N):
|
| 40 |
+
"""Naive get tuples that uses periods or newlines to denote sentences."""
|
| 41 |
+
if not txt:
|
| 42 |
+
return None, []
|
| 43 |
+
sentences = (x.split() for x in splitter_naive(txt) if x)
|
| 44 |
+
sentences = list(map(remove_stop_words, list(sentences)))
|
| 45 |
+
# sentences = (remove_stop_words(nlp(x)) for x in splitter_naive(txt) if x)
|
| 46 |
+
# sentences = parse_sentences(txt, nlp)
|
| 47 |
+
# print(list(sentences))
|
| 48 |
+
ng = (ngrams(x, N) for x in sentences if len(x) >= N)
|
| 49 |
+
return sentences, list(chain(*ng))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def count_by_split(split_data):
|
| 53 |
+
c = Counter()
|
| 54 |
+
for entry in split_data:
|
| 55 |
+
text = entry['text']
|
| 56 |
+
sents, tup = get_tuples_manual_sentences(text, N)
|
| 57 |
+
tup = ["_".join(ta) for ta in tup]
|
| 58 |
+
c.update(tup)
|
| 59 |
+
return c
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# data = load_dataset("bigbio/biodatasets/chemdner/chemdner.py", name="chemdner_bigbio_text")
|
| 63 |
+
# counters = []
|
| 64 |
+
# for split, split_data in data.items():
|
| 65 |
+
# split_counter = count_by_split(split_data)
|
| 66 |
+
# counters.append(split_counter)
|
| 67 |
+
|
| 68 |
+
# ab_intersect = counters[0] & counters[1]
|
| 69 |
+
# diff = {x: count for x, count in counters[0].items() if x not in ab_intersect.keys() and count > 2}
|
| 70 |
+
# if len(counters) > 2:
|
| 71 |
+
# bc_intersect = counters[1] & counters[2]
|
| 72 |
+
# print(ab_intersect.most_common(10))
|
| 73 |
+
# print(Counter(diff).most_common(10))
|
| 74 |
+
# data.cleanup_cache_files()
|
streamlit.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
from bigbio.dataloader import BigBioConfigHelpers
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
from datasets import load_dataset
|
| 8 |
+
dataset = load_dataset("bigbio/scitail", name="scitail_bigbio_te")
|
| 9 |
+
|
| 10 |
+
ds = pd.DataFrame(dataset["train"])
|
| 11 |
+
st.write(ds)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
conhelps = BigBioConfigHelpers()
|
| 15 |
+
conhelps = conhelps.filtered(lambda x: x.dataset_name != "pubtator_central")
|
| 16 |
+
conhelps = conhelps.filtered(lambda x: x.is_bigbio_schema)
|
| 17 |
+
conhelps = conhelps.filtered(lambda x: not x.is_local)
|
| 18 |
+
st.write(
|
| 19 |
+
"loaded {} configs from {} datasets".format(
|
| 20 |
+
len(conhelps),
|
| 21 |
+
len(set([helper.dataset_name for helper in conhelps])),
|
| 22 |
+
)
|
| 23 |
+
)
|
vis_data_card.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from matplotlib_venn import venn2, venn3
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
import plotly.io as pio
|
| 8 |
+
from datasets import load_dataset
|
| 9 |
+
from plotly.subplots import make_subplots
|
| 10 |
+
from rich import print as rprint
|
| 11 |
+
|
| 12 |
+
from collections import Counter
|
| 13 |
+
|
| 14 |
+
from ngram import get_tuples_manual_sentences
|
| 15 |
+
|
| 16 |
+
from bigbio.dataloader import BigBioConfigHelpers
|
| 17 |
+
import sys
|
| 18 |
+
|
| 19 |
+
pio.kaleido.scope.mathjax = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# vanilla tokenizer
|
| 23 |
+
def tokenizer(text, counter):
|
| 24 |
+
if not text:
|
| 25 |
+
return text, []
|
| 26 |
+
text = text.strip()
|
| 27 |
+
text = text.replace("\t", "")
|
| 28 |
+
text = text.replace("\n", "")
|
| 29 |
+
# split
|
| 30 |
+
text_list = text.split(" ")
|
| 31 |
+
return text, text_list
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def norm(lengths):
|
| 35 |
+
mu = np.mean(lengths)
|
| 36 |
+
sigma = np.std(lengths)
|
| 37 |
+
return mu, sigma
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def load_helper(local=""):
|
| 41 |
+
if local != "":
|
| 42 |
+
with open(local, "r") as file:
|
| 43 |
+
conhelps = json.load(file)
|
| 44 |
+
else:
|
| 45 |
+
conhelps = BigBioConfigHelpers()
|
| 46 |
+
conhelps = conhelps.filtered(lambda x: x.dataset_name != "pubtator_central")
|
| 47 |
+
conhelps = conhelps.filtered(lambda x: x.is_bigbio_schema)
|
| 48 |
+
conhelps = conhelps.filtered(lambda x: not x.is_local)
|
| 49 |
+
rprint(
|
| 50 |
+
"loaded {} configs from {} datasets".format(
|
| 51 |
+
len(conhelps),
|
| 52 |
+
len(set([helper.dataset_name for helper in conhelps])),
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
return conhelps
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
_TEXT_MAPS = {
|
| 59 |
+
"bigbio_kb": ["text"],
|
| 60 |
+
"bigbio_text": ["text"],
|
| 61 |
+
"bigbio_qa": ["question", "context"],
|
| 62 |
+
"bigbio_te": ["premise", "hypothesis"],
|
| 63 |
+
"bigbio_tp": ["text_1", "text_2"],
|
| 64 |
+
"bigbio_pairs": ["text_1", "text_2"],
|
| 65 |
+
"bigbio_t2t": ["text_1", "text_2"],
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
IBM_COLORS = [
|
| 69 |
+
"#648fff", # train
|
| 70 |
+
"#dc267f", # val
|
| 71 |
+
"#ffb000", # test
|
| 72 |
+
"#fe6100",
|
| 73 |
+
"#785ef0",
|
| 74 |
+
"#000000",
|
| 75 |
+
"#ffffff",
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
SPLIT_COLOR_MAP = {
|
| 79 |
+
"train": "#648fff",
|
| 80 |
+
"validation": "#dc267f",
|
| 81 |
+
"test": "#ffb000",
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
N = 3
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def token_length_per_entry(entry, schema, counter):
|
| 88 |
+
result = {}
|
| 89 |
+
entry_id = entry['id']
|
| 90 |
+
if schema == "bigbio_kb":
|
| 91 |
+
for passage in entry["passages"]:
|
| 92 |
+
result_key = passage["type"]
|
| 93 |
+
for key in _TEXT_MAPS[schema]:
|
| 94 |
+
text = passage[key][0]
|
| 95 |
+
if not text:
|
| 96 |
+
print(f"WARNING: text key does not exist: entry {entry_id}")
|
| 97 |
+
result["token_length"] = 0
|
| 98 |
+
result["text_type"] = result_key
|
| 99 |
+
continue
|
| 100 |
+
sents, ngrams = get_tuples_manual_sentences(text.lower(), N)
|
| 101 |
+
toks = [tok for sent in sents for tok in sent]
|
| 102 |
+
tups = ["_".join(tup) for tup in ngrams]
|
| 103 |
+
counter.update(tups)
|
| 104 |
+
result["token_length"] = len(toks)
|
| 105 |
+
result["text_type"] = result_key
|
| 106 |
+
else:
|
| 107 |
+
for key in _TEXT_MAPS[schema]:
|
| 108 |
+
text = entry[key]
|
| 109 |
+
if not text:
|
| 110 |
+
print(f"WARNING: text key does not exist, entry {entry_id}")
|
| 111 |
+
result["token_length"] = 0
|
| 112 |
+
result["text_type"] = key
|
| 113 |
+
continue
|
| 114 |
+
else:
|
| 115 |
+
sents, ngrams = get_tuples_manual_sentences(text.lower(), N)
|
| 116 |
+
toks = [tok for sent in sents for tok in sent]
|
| 117 |
+
result["token_length"] = len(toks)
|
| 118 |
+
result["text_type"] = key
|
| 119 |
+
tups = ["_".join(tup) for tup in ngrams]
|
| 120 |
+
counter.update(tups)
|
| 121 |
+
return result, counter
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def parse_token_length_and_n_gram(dataset, schema_type):
|
| 125 |
+
hist_data = []
|
| 126 |
+
n_gram_counters = []
|
| 127 |
+
for split, data in dataset.items():
|
| 128 |
+
n_gram_counter = Counter()
|
| 129 |
+
for i, entry in enumerate(data):
|
| 130 |
+
result, n_gram_counter = token_length_per_entry(
|
| 131 |
+
entry, schema_type, n_gram_counter
|
| 132 |
+
)
|
| 133 |
+
result["split"] = split
|
| 134 |
+
hist_data.append(result)
|
| 135 |
+
n_gram_counters.append(n_gram_counter)
|
| 136 |
+
|
| 137 |
+
return pd.DataFrame(hist_data), n_gram_counters
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def resolve_splits(df_split):
|
| 141 |
+
official_splits = set(df_split).intersection(set(SPLIT_COLOR_MAP.keys()))
|
| 142 |
+
return official_splits
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def draw_box(df, col_name, row, col, fig):
|
| 146 |
+
splits = resolve_splits(df["split"].unique())
|
| 147 |
+
for split in splits:
|
| 148 |
+
split_count = df.loc[df["split"] == split, col_name].tolist()
|
| 149 |
+
print(split)
|
| 150 |
+
fig.add_trace(
|
| 151 |
+
go.Box(
|
| 152 |
+
x=split_count,
|
| 153 |
+
name=split,
|
| 154 |
+
marker_color=SPLIT_COLOR_MAP[split.split("_")[0]],
|
| 155 |
+
),
|
| 156 |
+
row=row,
|
| 157 |
+
col=col,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def draw_bar(df, col_name, y_name, row, col, fig):
|
| 162 |
+
splits = resolve_splits(df["split"].unique())
|
| 163 |
+
for split in splits:
|
| 164 |
+
split_count = df.loc[df["split"] == split, col_name].tolist()
|
| 165 |
+
y_list = df.loc[df["split"] == split, y_name].tolist()
|
| 166 |
+
fig.add_trace(
|
| 167 |
+
go.Bar(
|
| 168 |
+
x=split_count,
|
| 169 |
+
y=y_list,
|
| 170 |
+
name=split,
|
| 171 |
+
marker_color=SPLIT_COLOR_MAP[split.split("_")[0]],
|
| 172 |
+
showlegend=False,
|
| 173 |
+
),
|
| 174 |
+
row=row,
|
| 175 |
+
col=col,
|
| 176 |
+
)
|
| 177 |
+
fig.update_traces(orientation="h") # horizontal box plots
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def parse_counters(metadata):
|
| 181 |
+
metadata = metadata[
|
| 182 |
+
list(metadata.keys())[0]
|
| 183 |
+
] # using the training counter to fetch the names
|
| 184 |
+
counters = []
|
| 185 |
+
for k, v in metadata.__dict__.items():
|
| 186 |
+
if "counter" in k and len(v) > 0:
|
| 187 |
+
counters.append(k)
|
| 188 |
+
return counters
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# generate the df for histogram
|
| 192 |
+
def parse_label_counter(metadata, counter_type):
|
| 193 |
+
hist_data = []
|
| 194 |
+
for split, m in metadata.items():
|
| 195 |
+
metadata_counter = getattr(m, counter_type)
|
| 196 |
+
for k, v in metadata_counter.items():
|
| 197 |
+
row = {}
|
| 198 |
+
row["labels"] = k
|
| 199 |
+
row[counter_type] = v
|
| 200 |
+
row["split"] = split
|
| 201 |
+
hist_data.append(row)
|
| 202 |
+
return pd.DataFrame(hist_data)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def gen_latex(dataset_name, helper, splits, schemas, fig_path):
|
| 206 |
+
if type(helper.description) is dict:
|
| 207 |
+
# TODO hacky, change this to include all decsriptions
|
| 208 |
+
descriptions = helper.description[list(helper.description.keys())[0]]
|
| 209 |
+
else:
|
| 210 |
+
descriptions = helper.description
|
| 211 |
+
descriptions = descriptions.replace("\n", "").replace("\t", "")
|
| 212 |
+
langs = [l.value for l in helper.languages]
|
| 213 |
+
languages = " ".join(langs)
|
| 214 |
+
if type(helper.license) is dict:
|
| 215 |
+
license = helper.license.value.name
|
| 216 |
+
else:
|
| 217 |
+
license = helper.license.name
|
| 218 |
+
tasks = [" ".join(t.name.lower().split("_")) for t in helper.tasks]
|
| 219 |
+
tasks = ", ".join(tasks)
|
| 220 |
+
schemas = " ".join([r"{\tt "] + list(schemas) + ["}"]) # TODO \tt
|
| 221 |
+
splits = ", ".join(list(splits))
|
| 222 |
+
data_name_display = " ".join(data_name.split("_"))
|
| 223 |
+
latex_bod = r"\clearpage" + "\n" + r"\section*{" + fr"{data_name_display}" + " Data Card" + r"}" + "\n"
|
| 224 |
+
latex_bod += (
|
| 225 |
+
r"\begin{figure}[ht!]"
|
| 226 |
+
+ "\n"
|
| 227 |
+
+ r"\centering"
|
| 228 |
+
+ "\n"
|
| 229 |
+
+ r"\includegraphics[width=\linewidth]{"
|
| 230 |
+
)
|
| 231 |
+
latex_bod += f"{fig_path}" + r"}" + "\n"
|
| 232 |
+
latex_bod += r"\caption{\label{fig:"
|
| 233 |
+
latex_bod += fr"{data_name}" + r"}"
|
| 234 |
+
latex_bod += (
|
| 235 |
+
r"Token frequency distribution by split (top) and frequency of different kind of instances (bottom).}"
|
| 236 |
+
+ "\n"
|
| 237 |
+
)
|
| 238 |
+
latex_bod += r"\end{figure}" + "\n" + r"\textbf{Dataset Description} "
|
| 239 |
+
latex_bod += (
|
| 240 |
+
fr"{descriptions}"
|
| 241 |
+
+ "\n"
|
| 242 |
+
+ r"\textbf{Homepage:} "
|
| 243 |
+
+ f"{helper.homepage}"
|
| 244 |
+
+ "\n"
|
| 245 |
+
+ r"\textbf{URL:} "
|
| 246 |
+
+ f"{helper.homepage}" # TODO change this later
|
| 247 |
+
+ "\n"
|
| 248 |
+
+ r"\textbf{Licensing:} "
|
| 249 |
+
+ f"{license}"
|
| 250 |
+
+ "\n"
|
| 251 |
+
+ r"\textbf{Languages:} "
|
| 252 |
+
+ f"{languages}"
|
| 253 |
+
+ "\n"
|
| 254 |
+
+ r"\textbf{Tasks:} "
|
| 255 |
+
+ f"{tasks}"
|
| 256 |
+
+ "\n"
|
| 257 |
+
+ r"\textbf{Schemas:} "
|
| 258 |
+
+ f"{schemas}"
|
| 259 |
+
+ "\n"
|
| 260 |
+
+ r"\textbf{Splits:} "
|
| 261 |
+
+ f"{splits}"
|
| 262 |
+
)
|
| 263 |
+
return latex_bod
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def write_latex(latex_body, latex_name):
|
| 267 |
+
text_file = open(f"tex/{latex_name}", "w")
|
| 268 |
+
text_file.write(latex_body)
|
| 269 |
+
text_file.close()
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def draw_figure(data_name, data_config_name, schema_type):
|
| 273 |
+
helper = conhelps.for_config_name(data_config_name)
|
| 274 |
+
metadata_helper = helper.get_metadata() # calls load_dataset for meta parsing
|
| 275 |
+
rprint(metadata_helper)
|
| 276 |
+
splits = metadata_helper.keys()
|
| 277 |
+
# calls HF load_dataset _again_ for token parsing
|
| 278 |
+
dataset = load_dataset(
|
| 279 |
+
f"bigbio/biodatasets/{data_name}/{data_name}.py", name=data_config_name
|
| 280 |
+
)
|
| 281 |
+
# general token length
|
| 282 |
+
tok_hist_data, ngram_counters = parse_token_length_and_n_gram(dataset, schema_type)
|
| 283 |
+
rprint(helper)
|
| 284 |
+
|
| 285 |
+
# general counter(s)
|
| 286 |
+
# TODO generate the pdf and fix latex
|
| 287 |
+
|
| 288 |
+
counters = parse_counters(metadata_helper)
|
| 289 |
+
print(counters)
|
| 290 |
+
rows = len(counters) // 3
|
| 291 |
+
if len(counters) >= 3:
|
| 292 |
+
# counters = counters[:3]
|
| 293 |
+
cols = 3
|
| 294 |
+
specs = [[{"colspan": 3}, None, None]] + [[{}, {}, {}]] * (rows + 1)
|
| 295 |
+
elif len(counters) == 1:
|
| 296 |
+
specs = [[{}], [{}]]
|
| 297 |
+
cols = 1
|
| 298 |
+
elif len(counters) == 2:
|
| 299 |
+
specs = [[{"colspan": 2}, None]] + [[{}, {}]] * (rows + 1)
|
| 300 |
+
cols = 2
|
| 301 |
+
counters.sort()
|
| 302 |
+
|
| 303 |
+
counter_titles = ["Label Counts by Type: " + ct.split("_")[0] for ct in counters]
|
| 304 |
+
titles = ("token length",) + tuple(counter_titles)
|
| 305 |
+
# Make figure with subplots
|
| 306 |
+
fig = make_subplots(
|
| 307 |
+
rows=rows + 2,
|
| 308 |
+
cols=cols,
|
| 309 |
+
subplot_titles=titles,
|
| 310 |
+
specs=specs,
|
| 311 |
+
vertical_spacing=0.10,
|
| 312 |
+
horizontal_spacing=0.10,
|
| 313 |
+
)
|
| 314 |
+
# draw token distribution
|
| 315 |
+
if "token_length" in tok_hist_data.keys():
|
| 316 |
+
draw_box(tok_hist_data, "token_length", row=1, col=1, fig=fig)
|
| 317 |
+
for i, ct in enumerate(counters):
|
| 318 |
+
row = i // 3 + 2
|
| 319 |
+
col = i % 3 + 1
|
| 320 |
+
label_df = parse_label_counter(metadata_helper, ct)
|
| 321 |
+
label_min = int(label_df[ct].min())
|
| 322 |
+
# filter_value = int((label_max - label_min) * 0.01 + label_min)
|
| 323 |
+
label_df = label_df[label_df[ct] >= label_min]
|
| 324 |
+
print(label_df.head(5))
|
| 325 |
+
|
| 326 |
+
# draw bar chart for counter
|
| 327 |
+
draw_bar(label_df, ct, "labels", row=row, col=col, fig=fig)
|
| 328 |
+
|
| 329 |
+
fig.update_annotations(font_size=12)
|
| 330 |
+
fig.update_layout(
|
| 331 |
+
margin=dict(l=25, r=25, t=25, b=25, pad=2),
|
| 332 |
+
# showlegend=False,
|
| 333 |
+
# title_text=data_name,
|
| 334 |
+
height=600,
|
| 335 |
+
width=1000,
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
# fig.show()
|
| 339 |
+
fig_name = f"{data_name}_{data_config_name}.pdf"
|
| 340 |
+
|
| 341 |
+
fig_path = f"figures/data_card/{fig_name}"
|
| 342 |
+
fig.write_image(fig_path)
|
| 343 |
+
dataset.cleanup_cache_files()
|
| 344 |
+
|
| 345 |
+
return helper, splits, fig_path
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
if __name__ == "__main__":
|
| 349 |
+
# load helpers
|
| 350 |
+
# each entry in local metadata is the dataset name
|
| 351 |
+
dc_local = load_helper(local="scripts/bigbio-public-metadatas-6-8.json")
|
| 352 |
+
# each entry is the config
|
| 353 |
+
conhelps = load_helper()
|
| 354 |
+
dc = list()
|
| 355 |
+
# TODO uncomment this
|
| 356 |
+
# for conhelper in conhelps:
|
| 357 |
+
# # print(f"{conhelper.dataset_name}-{conhelper.config.subset_id}-{conhelper.config.schema}")
|
| 358 |
+
# dc.append(conhelper.dataset_name)
|
| 359 |
+
|
| 360 |
+
# datacard per data, metadata chart per config
|
| 361 |
+
# for data_name, meta in dc_local.items():
|
| 362 |
+
# config_metas = meta['config_metas']
|
| 363 |
+
# config_metas_keys = config_metas.keys()
|
| 364 |
+
# if len(config_metas_keys) > 1:
|
| 365 |
+
# print(f'dataset {data_name} has more than one config')
|
| 366 |
+
# schemas = set()
|
| 367 |
+
# for config_name, config in config_metas.items():
|
| 368 |
+
# bigbio_schema = config['bigbio_schema']
|
| 369 |
+
# helper, splits, fig_path = draw_figure(data_name, config_name, bigbio_schema)
|
| 370 |
+
# schemas.add(helper.bigbio_schema_caps)
|
| 371 |
+
# latex_bod = gen_latex(data_name, helper, splits, schemas, fig_path)
|
| 372 |
+
# latex_name = f"{data_name}_{config_name}.tex"
|
| 373 |
+
# write_latex(latex_bod, latex_name)
|
| 374 |
+
# print(latex_bod)
|
| 375 |
+
|
| 376 |
+
# TODO try this code first, then use this for the whole loop
|
| 377 |
+
# skipped medal, too large, no nagel/pcr/pubtator_central/spl_adr_200db in local
|
| 378 |
+
data_name = sys.argv[1]
|
| 379 |
+
schemas = set()
|
| 380 |
+
# LOCAL
|
| 381 |
+
# meta = dc_local[data_name]
|
| 382 |
+
# config_metas = meta['config_metas']
|
| 383 |
+
# config_metas_keys = config_metas.keys()
|
| 384 |
+
# if len(config_metas_keys) >= 1:
|
| 385 |
+
# print(f'dataset {data_name} has more than one config')
|
| 386 |
+
# for config_name, config in config_metas.items():
|
| 387 |
+
# bigbio_schema = config['bigbio_schema']
|
| 388 |
+
# helper, splits, fig_path = draw_figure(data_name, config_name, bigbio_schema)
|
| 389 |
+
# schemas.add(helper.bigbio_schema_caps)
|
| 390 |
+
# latex_bod = gen_latex(data_name, helper, splits, schemas, fig_path)
|
| 391 |
+
# latex_name = f"{data_name}_{config_name}.tex"
|
| 392 |
+
# write_latex(latex_bod, latex_name)
|
| 393 |
+
# print(latex_bod)
|
| 394 |
+
# NON LOCAL
|
| 395 |
+
config_helpers = conhelps.for_dataset(data_name)
|
| 396 |
+
for config_helper in config_helpers:
|
| 397 |
+
rprint(config_helper)
|
| 398 |
+
bigbio_schema = config_helper.config.schema
|
| 399 |
+
config_name = config_helper.config.name
|
| 400 |
+
helper, splits, fig_path = draw_figure(data_name, config_name, bigbio_schema)
|
| 401 |
+
schemas.add(helper.bigbio_schema_caps)
|
| 402 |
+
latex_bod = gen_latex(data_name, helper, splits, schemas, fig_path)
|
| 403 |
+
latex_name = f"{data_name}_{config_name}.tex"
|
| 404 |
+
write_latex(latex_bod, latex_name)
|
| 405 |
+
print(latex_bod)
|
| 406 |
+
|