| # --- | |
| # jupyter: | |
| # jupytext: | |
| # formats: ipynb,py:percent | |
| # text_representation: | |
| # extension: .py | |
| # format_name: percent | |
| # format_version: '1.3' | |
| # jupytext_version: 1.16.0 | |
| # kernelspec: | |
| # display_name: data-dev | |
| # language: python | |
| # name: python3 | |
| # --- | |
| # %% | |
| import pandas as pd | |
| import seaborn as sns | |
| sns.set_style("whitegrid") | |
| # %% [markdown] | |
| # Load in LILA CSV from [this commit](https://huggingface.co/datasets/imageomics/lila-bc-camera/blob/37b93ddf25c63bc30d8488ef78c1a53b9c4a3115/data/potential-test-sets/lila_image_urls_and_labels.csv). (this will take a while) | |
| # | |
| # sha256:3fdf87ceea75f8720208a95350c3c70831a6c1c745a92bb68c7f2c3239e4c455 | |
| # size 15931383983 | |
| # %% | |
| df = pd.read_csv("../data/potential-test-sets/lila_image_urls_and_labels.csv", low_memory = False) | |
| df.head() | |
| # %% | |
| df.columns | |
| # %% | |
| df.annotation_level.value_counts() | |
| # %% [markdown] | |
| # Annotation level indicates image vs sequence (or unknown), we specifically want those annotated at the image-level, since they should be "clean" images. Though we will want to label them with how many distinct species are in the image first. | |
| # | |
| # We have 3,533,538 images labeled to the image-level. | |
| # | |
| # ### Check Dataset Counts | |
| # | |
| # 1. Make sure we have all datasets expected. We're specifically interested in: | |
| # - [Snapshot Safari 2024 Expansion](https://lila.science/datasets/snapshot-safari-2024-expansion/) | |
| # - [Ohio Small Animals](https://lila.science/datasets/ohio-small-animals/) | |
| # - [Desert Lion Conservation Camera Traps](https://lila.science/datasets/desert-lion-conservation-camera-traps/) | |
| # - [Orinoquia Camera Traps](https://lila.science/datasets/orinoquia-camera-traps/) | |
| # - [SWG Camera Traps 2018-2020](https://lila.science/datasets/swg-camera-traps) | |
| # - [Island Conservation Camera Traps](https://lila.science/datasets/island-conservation-camera-traps/) | |
| # - [NOAA Puget Sound Nearshore Fish 2017-2018](https://lila.science/datasets/noaa-puget-sound-nearshore-fish) could be interesting for the combined categories, though it is _very_ general (has only three labels: `fish`, `crab`, `fish_and_crab`). | |
| # 2. Check which/how many datasets are labeled to the image level (and check for match to [Andrey's spreadsheet](https://docs.google.com/spreadsheets/d/1sC90DolAvswDUJ1lNSf0sk_norR24LwzX2O4g9OxMZE/edit?usp=drive_link)). | |
| # %% | |
| df.dataset_name.value_counts() | |
| # %% | |
| df.groupby(["dataset_name"]).annotation_level.value_counts() | |
| # %% [markdown] | |
| # It seems snapshot safari exapansion and SWG camera traps are not labeled at the image level, despite the indication in the spreadsheet... | |
| # | |
| # The NOAA one isn't here, but that's okay. Let's also take a look at [ENA24](https://lila.science/datasets/ena24detection). | |
| # | |
| # We'll subset to just the 7 identified, though we'll likely not continue with Snapshot Safari and SWG, since we want to make sure the test set labels are accurate. | |
| # %% | |
| datasets_of_interest = ["Desert Lion Conservation Camera Traps", | |
| "Island Conservation Camera Traps", | |
| "Ohio Small Animals", | |
| "Orinoquia Camera Traps", | |
| "SWG Camera Traps", | |
| "Snapshot Safari 2024 Expansion", | |
| "ENA24"] | |
| # %% | |
| reduced_df = df.loc[df["dataset_name"].isin(datasets_of_interest)].copy() | |
| reduced_df.head() | |
| # %% [markdown] | |
| # Observe that we also now get multiple URL options; `url_aws` will likely be best/fastest for use with [`distributed-downloader`](https://github.com/Imageomics/distributed-downloader) to get the images. | |
| # %% | |
| reduced_df.info(show_counts = True) | |
| # %% [markdown] | |
| # Let's remove empty frames to get a better sense of what we have. | |
| # %% | |
| df_cleaned = reduced_df.loc[reduced_df.original_label != "empty"].copy() | |
| df_cleaned.info(show_counts = True) | |
| # %% [markdown] | |
| # Not all have a scientific name, though those could be the non-taxa labels. | |
| # %% | |
| df_cleaned.loc[df_cleaned["scientific_name"].isna(), "original_label"].value_counts() | |
| # %% [markdown] | |
| # These are clearly also labels to remove, so we can simply reduce down to only those with non-null `scientific_name` values as well. | |
| # %% | |
| df_cleaned = df_cleaned.loc[~df_cleaned["scientific_name"].isna()].copy() | |
| df_cleaned.info(show_counts=True) | |
| # %% | |
| df_cleaned.nunique() | |
| # %% [markdown] | |
| # We have 368 unique `scientific_name` values, some of which were definitely just higher ranks (e.g., Aves), but there are 283 species, so somewhere between the two should be our biodiversity. | |
| # | |
| # Interesting also to note that there are duplicate URLs here; these would be the indicators of multiple species in an image as they correspond to the number of unique image IDs. Though, those could also be the by-sequence images that we expected to be by-image. | |
| # %% | |
| #double-check for humans | |
| df_cleaned.loc[df_cleaned.species == "homo sapien"] | |
| # %% [markdown] | |
| # ## Save the Reduced Data (no more "empty" labels) | |
| # %% | |
| df_cleaned.to_csv("../data/potential-test-sets/lila_image_urls_and_labels.csv", index = False) | |
| # %% | |
| print(df_cleaned.phylum.value_counts()) | |
| print() | |
| print(df_cleaned["class"].value_counts()) | |
| # %% [markdown] | |
| # All images are in Animalia, as expected; we have 2 phyla represented and 8 classes: | |
| # - Predominantly Chordata, and within that phylum, Mammalia is the vast majority, though aves is about 10%. | |
| # - Note that not every image with a phylum label has a class label. | |
| # - Insecta, malacostraca, and arachnida are all in the class Arthropoda. | |
| # | |
| # ### Label Multi-Species Images | |
| # We'll go by both the URL and image ID, which do seem to correspond to the same images (for uniqueness). | |
| # %% | |
| df_cleaned["multi_species"] = df_cleaned.duplicated(subset = ["url_aws", "image_id"], keep = False) | |
| df_cleaned.loc[df_cleaned["multi_species"]].nunique() | |
| # %% [markdown] | |
| # We've got just under 63K images that have multiple species. We can figure out how many each of them have, and then move on to looking at images per sequence and other labeling info. | |
| # %% | |
| multi_sp_imgs = list(df_cleaned.loc[df_cleaned["multi_species"], "image_id"].unique()) | |
| # %% | |
| for img in multi_sp_imgs: | |
| df_cleaned.loc[df_cleaned["image_id"] == img, "num_species"] = df_cleaned.loc[df_cleaned["image_id"] == img].shape[0] | |
| df_cleaned.head() | |
| # %% [markdown] | |
| # Set all the non-multi species images to show 1 in the `num_species` column. | |
| # %% | |
| df_cleaned.loc[df_cleaned["num_species"].isna(), "num_species"] = 1.0 | |
| df_cleaned.num_species.value_counts() | |
| # %% | |
| df_cleaned.loc[df_cleaned["num_species"] == 14.0].sample(4) | |
| # %% [markdown] | |
| # Found a typo above with the human check... seems all taxa are lowercase, but let's make sure it's enough to catch them all | |
| # %% | |
| print("num homo sapiens: ", df_cleaned.loc[df_cleaned.species == "homo sapiens"].shape) | |
| df_cleaned.loc[df_cleaned["original_label"] == "human"].shape | |
| # %% [markdown] | |
| # Did any of these factor in to the multi-species counts? | |
| # %% | |
| df_cleaned.loc[(df_cleaned["species"] == "homo sapiens") & (df_cleaned["multi_species"])].shape | |
| # %% | |
| df_cleaned.loc[(df_cleaned["species"] == "homo sapiens") & (df_cleaned["multi_species"])].sample(4) | |
| # %% [markdown] | |
| # Let's fix those counts then. | |
| # %% | |
| human_multi_species = list(df_cleaned.loc[(df_cleaned["species"] == "homo sapiens") & (df_cleaned["multi_species"]), "image_id"].unique()) | |
| for img in human_multi_species: | |
| df_cleaned.loc[df_cleaned["image_id"] == img, "num_species"] = df_cleaned.loc[df_cleaned["image_id"] == img, "num_species"] - 1 | |
| df_cleaned.num_species.value_counts() | |
| # %% [markdown] | |
| # Actually remove human indicators | |
| # %% | |
| df_cleaned = df_cleaned.loc[df_cleaned["species"] != "homo sapiens"].copy() | |
| # %% [markdown] | |
| # Need to remove the images that have humans and other species too. | |
| # %% | |
| df_cleaned = df_cleaned.loc[~df_cleaned["image_id"].isin(human_multi_species)].copy() | |
| # %% [markdown] | |
| # #### Save this to CSV now we got those counts | |
| # %% | |
| df_cleaned.to_csv("../data/potential-test-sets/lila_image_urls_and_labels.csv", index = False) | |
| # %% [markdown] | |
| # ### Generate individual CSVs for the datasets | |
| # %% | |
| for dataset in datasets_of_interest: | |
| df_cleaned.loc[df_cleaned["dataset_name"] == dataset].to_csv(dataset+"_image_urls_and_labels.csv", index = False) | |
| # Manually moved these to the data/potential-test-sets/ directory and renamed to not have spaces in the filenames | |
| # (replaced spaces with underscores) | |
| # %% [markdown] | |
| # Get some basic stats | |
| # %% | |
| print(f"there are {df_cleaned.shape[0]} images") | |
| print(f"we have {df_cleaned['scientific_name'].nunique()} unique scientific names") | |
| print(f"when we filter for image-level labels, we have {df_cleaned.loc[df_cleaned['annotation_level'] == 'image', 'scientific_name'].nunique()} scientific names") | |
| # %% | |
| df_cleaned.loc[df_cleaned['annotation_level'] == 'image', 'num_species'].value_counts() | |
| # %% [markdown] | |
| # We will want to dedicate some more time to exploring some of these taxonomic counts, but we'll first look at the number of unique taxa (by Linnean 7-rank (`unique_7_tuple`)). We'll compare these to the number of unique scientific and common names, then perhaps add a count of number of creatures based on one of those labels. At that point we may save another copy of this CSV and start a new analysis notebook. | |
| # %% | |
| df_cleaned.annotation_level.value_counts() | |
| # %% [markdown] | |
| # Let's get a sense of total number of unique taxa, then separate out the by-image ones for unique taxa count there. Then we'll separate out each dataset into its own CSV for individual analysis. | |
| # %% [markdown] | |
| # ### Taxonomic String Exploration | |
| # %% | |
| lin_taxa = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'] | |
| # %% [markdown] | |
| # #### How many have all 7 Linnean ranks? | |
| # %% | |
| df_all_taxa = df_cleaned.dropna(subset = lin_taxa) | |
| df_all_taxa[lin_taxa].info(show_counts = True) | |
| # %% | |
| df_all_taxa_img = df_cleaned.loc[df_cleaned["annotation_level"] == "image"].dropna(subset = lin_taxa) | |
| df_all_taxa_img[lin_taxa].info(show_counts = True) | |
| # %% | |
| df_cleaned.loc[df_cleaned["annotation_level"] == "image"].shape | |
| # %% [markdown] | |
| # That's not too bad, considering some are definitely just common names or classes: 2,187,756 out of 2,867,312. | |
| # | |
| # 249,847 when we drop to just image-level annotations (out of 306,978). | |
| # | |
| # | |
| # Now how many different 7-tuples are there? | |
| # | |
| # #### How many unique 7-tuples? | |
| # %% | |
| #number of unique 7-tuples in full dataset | |
| df_cleaned['lin_duplicate'] = df_cleaned.duplicated(subset = lin_taxa, keep = 'first') | |
| df_unique_lin_taxa = df_cleaned.loc[~df_cleaned['lin_duplicate']].copy() | |
| print(f"unique taxa in all: {df_unique_lin_taxa.shape[0]}") | |
| print(f"unique taxa in image-level labeled: {df_unique_lin_taxa.loc[df_unique_lin_taxa["annotation_level"] == "image"].shape[0]}") | |
| # %% [markdown] | |
| # Pretty much aligns with the scientific name counts. | |
| # %% | |
| df_unique_lin_taxa.scientific_name.nunique() | |
| # %% | |
| df_unique_lin_taxa.loc[(df_unique_lin_taxa["scientific_name"].isna()) | (df_unique_lin_taxa["common_name"].isna())] | |
| # %% [markdown] | |
| # Let's check out our top ten labels, scientific names, and common names. Then we'll save this cleaned metadata file. | |
| # %% | |
| df_cleaned["original_label"].value_counts()[:10] | |
| # %% | |
| df_cleaned["scientific_name"].value_counts()[:10] | |
| # %% | |
| df_cleaned["common_name"].value_counts()[:10] | |
| # %% | |
| sns.histplot(df_cleaned, y = 'class') | |
| # %% | |
| sns.histplot(df_cleaned.loc[df_cleaned["class"].isin(["aves", "mammalia", "reptilia"])], y = 'order') | |
| # %% | |