| # --- | |
| # jupyter: | |
| # jupytext: | |
| # formats: ipynb,py:percent | |
| # text_representation: | |
| # extension: .py | |
| # format_name: percent | |
| # format_version: '1.3' | |
| # jupytext_version: 1.15.2 | |
| # kernelspec: | |
| # display_name: Python 3 (ipykernel) | |
| # language: python | |
| # name: python3 | |
| # --- | |
| # %% | |
| import pandas as pd | |
| import seaborn as sns | |
| sns.set_style("whitegrid") | |
| sns.set(rc = {'figure.figsize': (10,10)}) | |
| # %% | |
| df = pd.read_csv("../data/predicted-catalog.csv") | |
| # %% | |
| df.head() | |
| # %% | |
| df.info(show_counts = True) | |
| # %% [markdown] | |
| # The `train_small` is duplicates of `train`, so we will drop those to analyze the full training set plus val. | |
| # %% | |
| df = df.loc[df.split != 'train_small'] | |
| # %% | |
| df.info(show_counts = True) | |
| # %% [markdown] | |
| # `predicted-catalog` doesn't have `train_small`, hence, it's a smaller file. | |
| # %% [markdown] | |
| # Original version had 10,436,521 entries; we expected loss of about 84K from the genera with label "unknown". | |
| # We have still lost about 300K images from the original 10,092,530, but the pre-webdataset generation gained about 30K back from v3.2 (adding subspecies back in?). | |
| # | |
| # ....... | |
| # | |
| # after webdataset generation: | |
| # | |
| # , but there's still another ~300K missing for this 10,065,576. It seems subspecies were not integrated back in under their species for this version, as we now have 269 less images than last time. | |
| # | |
| # Coverage for species and genus has also dropped by 269 and similar, resp. | |
| # %% [markdown] | |
| # ### Focus here on the difference between `predicted-catalog` and `catalog`, which is only in EOL data. | |
| # %% | |
| df.nunique() | |
| # %% [markdown] | |
| # There are 504,018 unique EOL page IDs (total 6,277,374 entries), compared to the 503,589 in the webdataset (total 6,250,420 images). | |
| # %% | |
| # Number that get dropped in webdataset | |
| print(f"There are {6277374 - 6250420} less entries in the webdataset.") | |
| # %% [markdown] | |
| # Notice that we have 12 unique kingdoms, which we're sticking with. | |
| # %% | |
| df['kingdom'].value_counts() | |
| # %% [markdown] | |
| # There is 1 more member of `Plantae` predicted, but 27K more `Animalia`. | |
| # %% | |
| taxa = list(df.columns[9:16]) | |
| taxa | |
| # %% [markdown] | |
| # Check the number of images with all 7 taxonomic labels. | |
| # %% | |
| df_all_taxa = df.dropna(subset = taxa) | |
| df_all_taxa[taxa].info(show_counts = True) | |
| # %% [markdown] | |
| # We have 8,482,197 entries with full taxonomic labels, compared to 8,455,243 in the webdataset, so | |
| # ### _**all**_ of our lost entries have all taxonomic ranks filled. | |
| # %% [markdown] | |
| # Let's add a column indicating the original data source so we can also get some stats by datasource, specifically focusing on EOL now. | |
| # %% | |
| # Add data_source column for easier slicing | |
| df.loc[df['inat21_filename'].notna(), 'data_source'] = 'iNat21' | |
| df.loc[df['bioscan_filename'].notna(), 'data_source'] = 'BIOSCAN' | |
| df.loc[df['eol_content_id'].notna(), 'data_source'] = 'EOL' | |
| # %% [markdown] | |
| # First, check their unique class values (`common`). | |
| # %% | |
| df.loc[df['data_source'] == 'EOL', 'common'].nunique() | |
| # %% [markdown] | |
| # EOL number of unique classes is 439,910 in the webdataset, so we do lose 386... | |
| # %% [markdown] | |
| # Make `df_taxa` with just taxa columns (+ `common` & `data_source`) so it's smaller to process faster. | |
| # %% | |
| taxa_com = list(df.columns[9:17]) # taxa + common | |
| taxa_com.insert(0, 'data_source') | |
| df_taxa = df[taxa_com] | |
| # %% | |
| df_taxa.head() | |
| # %% [markdown] | |
| # Let's look a little closer at EOL. | |
| # %% | |
| inat21_df = df_taxa.loc[df_taxa.data_source == 'iNat21'] | |
| bioscan_df = df_taxa.loc[df_taxa.data_source == 'BIOSCAN'] | |
| eol_df = df_taxa.loc[df_taxa.data_source == 'EOL'] | |
| # %% [markdown] | |
| # ### EOL | |
| # %% | |
| eol_df.info(show_counts = True) | |
| # %% | |
| eol_df.nunique() | |
| # %% [markdown] | |
| # It seems the webdataset loses 5 families, 39 genera, and 84 species. As noted above, 386 common labels are lost. | |
| # %% [markdown] | |
| # There are no missing species that get lost, as observed above, all lost entries have all taxonomic labels. | |
| # %% | |
| #number of unique 7-tuples in EOL | |
| eol_df['duplicate'] = eol_df.duplicated(subset = taxa, keep = 'first') | |
| eol_df_unique_taxa = eol_df.loc[~eol_df['duplicate']] | |
| # %% | |
| eol_df_unique_taxa.info(show_counts = True) | |
| # %% [markdown] | |
| # 400 unique taxa are lost. | |
| # %% [markdown] | |
| # Let's check the `species` length in EOL as well, we know there are some that have genus-species. And others with hybrids that get VERY long. | |
| # | |
| # Wonder if this is where we lose some? | |
| # Quick check in the catalog notebook shows there are still plenty with full taxa there. | |
| # %% | |