| # --- | |
| # jupyter: | |
| # jupytext: | |
| # formats: ipynb,py:percent | |
| # text_representation: | |
| # extension: .py | |
| # format_name: percent | |
| # format_version: '1.3' | |
| # jupytext_version: 1.16.0 | |
| # kernelspec: | |
| # display_name: Python 3 (ipykernel) | |
| # language: python | |
| # name: python3 | |
| # --- | |
| # %% | |
| import pandas as pd | |
| import seaborn as sns | |
| sns.set_style("whitegrid") | |
| sns.set(rc = {'figure.figsize': (10,10)}) | |
| # %% [markdown] | |
| # Load in full images to ease process. | |
| # %% | |
| df = pd.read_csv("../data/predicted-catalog.csv", low_memory = False) | |
| # %% | |
| df.head() | |
| # %% | |
| df.info(show_counts = True) | |
| # %% [markdown] | |
| # The `train_small` is duplicates of `train`, so we will drop those to analyze the full training set plus val. | |
| # %% [markdown] | |
| # `predicted-catalog` doesn't have `train_small`, hence, it's a smaller file. | |
| # %% [markdown] | |
| # Let's add a column indicating the original data source so we can also get some stats by datasource, specifically focusing on EOL since we know licensing for BIOSCAN-1M and iNat21. | |
| # %% | |
| # Add data_source column for easier slicing | |
| df.loc[df['inat21_filename'].notna(), 'data_source'] = 'iNat21' | |
| df.loc[df['bioscan_filename'].notna(), 'data_source'] = 'BIOSCAN' | |
| df.loc[df['eol_content_id'].notna(), 'data_source'] = 'EOL' | |
| # %% [markdown] | |
| # #### Get just EOL CSV for license addition | |
| # %% | |
| eol_df = df.loc[df['data_source'] == 'EOL'] | |
| # %% | |
| eol_df.head() | |
| # %% [markdown] | |
| # We don't need the BIOSCAN or iNat21 columns, nor the taxa columns. | |
| # %% | |
| eol_license_cols = eol_df.columns[1:4] | |
| eol_license_cols | |
| # %% | |
| eol_license_df = eol_df[eol_license_cols] | |
| #eol_license_df["license"] = None | |
| # %% | |
| eol_license_df.head() | |
| # %% | |
| #eol_license_df.to_csv("../data/eol_files/eol_licenses.csv", index = False) | |
| # %% [markdown] | |
| # ### Merge with Media Manifest to Check for Licenses | |
| # Previous license files (retained below) are missing files, let's merge with the [media manifest](https://huggingface.co/datasets/imageomics/eol/blob/be7b7e6c372f6547e30030e9576d9cc638320099/data/interim/media_manifest.csv) all these images should have been downloaded from to see if there are any here that don't exist in it. From there we'll check licensing info. | |
| # %% | |
| media = pd.read_csv("../data/media_manifest (july 26).csv", dtype = {"EOL content ID": "int64", "EOL page ID": "int64"}, low_memory = False) | |
| media.info(show_counts = True) | |
| # %% | |
| # Read eol license df back in with type int64 for ID columns | |
| eol_license_df = pd.read_csv("../data/eol_files/eol_licenses.csv", | |
| dtype = {"eol_content_id": "int64", "eol_page_id": "int64"}, | |
| low_memory = False) | |
| # %% | |
| eol_license_df.shape | |
| # %% | |
| eol_df = eol_df.astype({"eol_content_id": "int64", "eol_page_id": "int64"}) | |
| eol_df.info() | |
| # %% | |
| eol_license_df = eol_df[eol_license_cols] | |
| # %% [markdown] | |
| # Notice that we have about 300K more entries in the media manifest, which is about expected from the [comparison of predicted-catalog to the original full list](https://huggingface.co/datasets/imageomics/ToL-EDA/blob/main/notebooks/ToL_predicted-catalog_EDA.ipynb). | |
| # %% | |
| media.rename(columns = {"EOL content ID": "eol_content_id"}, inplace = True) | |
| # %% | |
| eol_df_media = pd.merge(eol_license_df, media, how = "left", on = "eol_content_id") | |
| # %% | |
| eol_df_media.info(show_counts = True) | |
| # %% [markdown] | |
| # We have about 620K images missing copyright owner. | |
| # %% | |
| eol_df_media.head() | |
| # %% | |
| eol_df_media.loc[eol_df_media["Copyright Owner"].isna()].nunique() | |
| # %% [markdown] | |
| # The missing info is distributed across 116,609 pages. | |
| # | |
| # There also seems to be a discrepancy in the number of page IDs between these. This lead to duplicated records...definitely something's off. | |
| # %% [markdown] | |
| # Check consistency of merge when matching both `eol_content_id` and `eol_page_id`. | |
| # %% | |
| media.rename(columns = {"EOL page ID": "eol_page_id"}, inplace = True) | |
| # %% | |
| merge_cols = ["eol_content_id", "eol_page_id"] | |
| # %% | |
| eol_df_media_cp = pd.merge(eol_license_df, media, how = "inner", left_on = merge_cols, right_on = merge_cols) | |
| eol_df_media_cp.info(show_counts = True) | |
| # %% [markdown] | |
| # Okay, so we do have a mis-match of about 113K images where the content IDs and page IDs don't both match. | |
| # %% | |
| eol_df_media_cp.to_csv("../data/eol_files/eol_cp_match_media.csv", index = False) | |
| # %% | |
| tol_ids_in_media = list(eol_df_media_cp.treeoflife_id) | |
| tol_ids_in_media[:5] | |
| # %% | |
| eol_license_df.head() | |
| # %% [markdown] | |
| # Let's save a copy of the EOL section with content and page IDs that are mismatched. | |
| # %% | |
| eol_df_missing_media = eol_license_df.loc[~eol_license_df.treeoflife_id.isin(tol_ids_in_media)] | |
| eol_df_missing_media.info(show_counts = True) | |
| # %% | |
| eol_df_missing_media.to_csv("../data/eol_files/eol_cp_not_media.csv", index = False) | |
| # %% [markdown] | |
| # ### Save Record of Missing Content IDs & Compare to Older Media Manifest | |
| # Let's save a record of the missing content IDs, then we'll compare them to the [July 6 media manifest](https://huggingface.co/datasets/imageomics/eol/blob/eaa00a48fa188f12906c5b8074d60aa8e67eb135/data/interim/media_manifest.csv) to see if any are in there. The July 6 media manifest is smaller, but we'll still check. | |
| # %% | |
| eol_missing_content_ids = eol_df_media.loc[eol_df_media["Medium Source URL"].isna()] | |
| eol_missing_content_ids.head() | |
| # %% [markdown] | |
| # The pages exist (`eol.org/pages/<eol_page_id>`), but the content IDs do not (`eol.org/media/<eol_content_id>` produces 404). | |
| # %% | |
| #eol_missing_content_ids.to_csv("../data/eol_files/eol_missing_content_ids.csv", index = False) | |
| # %% | |
| media_old = pd.read_csv("../data/media_manifest.csv", dtype = {"EOL content ID": "int64", "EOL page ID": "int64"}, low_memory = False) | |
| media_old.info(show_counts = True) | |
| # %% | |
| media_old.rename(columns = {"EOL content ID": "eol_content_id"}, inplace = True) | |
| # %% | |
| eol_df_media_old = pd.merge(eol_missing_content_ids[eol_license_cols], media_old, how = "left", on = "eol_content_id") | |
| # %% | |
| eol_df_media_old.info(show_counts = True) | |
| # %% [markdown] | |
| # No, we do not have any of the missing ones in the older media manifest. | |
| # %% [markdown] | |
| # ### Check how this compares to Catalog | |
| # Let's see if these are all images in TreeOfLife-10M, or a mix between it and Rare Species. | |
| # %% | |
| cat_df = pd.read_csv("../data/catalog.csv", low_memory = False) | |
| # Remove duplicates in train_small | |
| cat_df = cat_df.loc[cat_df.split != 'train_small'] | |
| # %% | |
| # Add data_source column for easier slicing | |
| cat_df.loc[cat_df['inat21_filename'].notna(), 'data_source'] = 'iNat21' | |
| cat_df.loc[cat_df['bioscan_filename'].notna(), 'data_source'] = 'BIOSCAN' | |
| cat_df.loc[cat_df['eol_content_id'].notna(), 'data_source'] = 'EOL' | |
| # %% | |
| eol_cat_df = cat_df.loc[cat_df.data_source == "EOL"] | |
| # %% | |
| eol_cat_df_media = pd.merge(eol_cat_df[eol_license_cols], media, how = "left", on = "eol_content_id") | |
| eol_cat_df_media.info(show_counts = True) | |
| # %% [markdown] | |
| # Looks like the problem is distributed across both datasets. | |
| # %% | |
| eol_cat_df_media.loc[eol_cat_df_media["Medium Source URL"].isna()].nunique() | |
| # %% [markdown] | |
| # For `catalog` the missing information is distributed across 9,634 pages, so that's 128 pages (of 400) in the Rare Species dataset that we can't currently match. | |
| # %% [markdown] | |
| # ### What are the taxa of the missing images? | |
| # | |
| # Let's bring back a version with the taxa and see what we're dealing with on that end without needing to open the pages. | |
| # %% | |
| cols_of_interest = ['treeoflife_id', 'eol_content_id', 'eol_page_id', | |
| 'kingdom', 'phylum', 'class', 'order', 'family', | |
| 'genus', 'species', 'common'] | |
| # %% | |
| taxa_cols = ['kingdom', 'phylum', 'class', 'order', 'family', | |
| 'genus', 'species', 'common'] | |
| # %% | |
| eol_taxa_df_media = pd.merge(eol_df[cols_of_interest], media, how = "left", on = "eol_content_id") | |
| # %% | |
| eol_taxa_df_media.loc[eol_taxa_df_media["Medium Source URL"].isna()].nunique() | |
| # %% | |
| eol_taxa_df_media.loc[eol_taxa_df_media["Medium Source URL"].isna()].info(show_counts = True) | |
| # %% | |
| eol_taxa_df_media.loc[eol_taxa_df_media["Medium Source URL"].isna()].sample(7) | |
| # %% [markdown] | |
| # Save a copy of the missing content IDs with taxa info as well. | |
| # %% | |
| #eol_taxa_df_media.loc[eol_taxa_df_media["Medium Source URL"].isna()].to_csv("../data/eol_files/eol_taxa_missing_content_ids.csv", index = False) | |
| # %% [markdown] | |
| # And in `catalog`... | |
| # %% | |
| eol_cat_df_taxa_media = pd.merge(eol_cat_df[cols_of_interest], media, how = "left", on = "eol_content_id") | |
| eol_cat_df_taxa_media.loc[eol_cat_df_taxa_media["Medium Source URL"].isna()].nunique() | |
| # %% [markdown] | |
| # Alright, so it's 2 orders in Rare species. | |
| # %% | |
| eol_cat_df_taxa_media.loc[eol_cat_df_taxa_media["Medium Source URL"].isna()].info(show_counts = True) | |
| # %% | |
| eol_cat_df_taxa_media.loc[eol_cat_df_taxa_media["Medium Source URL"].isna()].sample(4) | |
| # %% [markdown] | |
| # ## Compare Media Cargo | |
| # Media cargo is all images we downloaded from EOL 29 July 2023, so should match to `predicted-catalog`. | |
| # %% | |
| cargo = pd.read_csv("../data/eol_media_cargo_names.csv", dtype = {"EOL content ID": "int64", "EOL page ID": "int64"}) | |
| cargo.info(show_counts = True) | |
| # %% | |
| cargo.nunique() | |
| # %% | |
| cargo.head() | |
| # %% | |
| cargo.rename(columns = {"EOL content ID": "eol_content_id"}, inplace = True) | |
| eol_df_cargo = pd.merge(eol_license_df, cargo, how = "left", on = "eol_content_id") | |
| # %% | |
| eol_df_cargo.info(show_counts = True) | |
| # %% [markdown] | |
| # There seem to be 633 images here that aren't listed in the media cargo. | |
| # | |
| # What about in catalog? | |
| # %% | |
| eol_cat_cargo = pd.merge(eol_cat_df[eol_license_cols], cargo, how = "left", on = "eol_content_id") | |
| eol_cat_cargo.info(show_counts = True) | |
| # %% [markdown] | |
| # Still missing 633 images...so we know it's not part of the Rare Species dataset, but is TreeOfLife-10M... | |
| # %% | |
| media_in_cargo = pd.merge(cargo, media, how = "right", on = "eol_content_id") | |
| media_in_cargo.info(show_counts = True) | |
| # %% [markdown] | |
| # But there are 26,868 images in media manifest that are not in cargo (or at least the content ID's aren't), despite the media cargo having 154K more images listed. | |
| # %% [markdown] | |
| # ## Compare to Newer Media Manifest | |
| # | |
| # We will load in a [new media manifest](https://huggingface.co/datasets/imageomics/eol/blob/3aa274067fc4a18877fb394b1d49a92962c57ed8/data/interim/media_manifest_Dec6.csv) (downloaded Dec. 6) to match up `page_id`s for the missing `content_id`s. This way we can download the images and compare via MD5 checksums to hopefully map the new `content_id`s to the old. (See [discussion #18](https://huggingface.co/datasets/imageomics/eol/discussions/18) in [EOL Repo](https://huggingface.co/datasets/imageomics/eol).) | |
| # %% | |
| media_new = pd.read_csv("../data/media_manifest_Dec6.csv", dtype = {"EOL content ID": "int64", "EOL page ID": "int64"}, low_memory = False) | |
| media_new.info(show_counts = True) | |
| # %% | |
| media_new.head() | |
| # %% [markdown] | |
| # To allow for easier sanity-check on the matches, we'll use the version of missing info list with taxa included. | |
| # %% | |
| eol_taxa_df_missing_media = eol_taxa_df_media.loc[eol_taxa_df_media["Medium Source URL"].isna()] | |
| eol_taxa_df_missing_media.head() | |
| # %% [markdown] | |
| # Rename `EOL content ID` and `EOL page ID` columns to match our `eol_taxa_df_missing_media` for easier merging. | |
| # %% | |
| media_new.rename(columns = {"EOL content ID": "eol_content_id", "EOL page ID": "eol_page_id"}, inplace = True) | |
| # %% [markdown] | |
| # First check for any matching content IDs | |
| # %% | |
| eol_taxa_df_missing_media_new_check = pd.merge(eol_taxa_df_missing_media[cols_of_interest], media_new, how = "left", on = "eol_content_id") | |
| eol_taxa_df_missing_media_new_check.info(show_counts = True) | |
| # %% [markdown] | |
| # Yes, there are no matching content IDs here. | |
| # | |
| # Now, let's get our match on page IDs to check they are all listed still for download. | |
| # %% | |
| pg_ids_missing_content = set(eol_taxa_df_missing_media.eol_page_id) | |
| pg_ids_media_new = set(media_new.eol_page_id) | |
| print(f"There are {len(pg_ids_missing_content)} unique page ids that have missing content ids, and there are {len(pg_ids_media_new)} total page ids in the new media manifest.") | |
| # %% | |
| missing_pgs = [] | |
| for pg in pg_ids_missing_content: | |
| if pg not in pg_ids_media_new: | |
| missing_pgs.append(pg) | |
| print(len(missing_pgs)) | |
| # %% | |
| media.rename(columns = {"EOL page ID": "eol_page_id"}, inplace = True) | |
| pg_ids_media = set(media.eol_page_id) | |
| print(f"There are {len(pg_ids_media)} total page ids in the July 26 media manifest.") | |
| missing_pgs_jul26 = [] | |
| for pg in pg_ids_missing_content: | |
| if pg not in pg_ids_media: | |
| missing_pgs_jul26.append(pg) | |
| print(len(missing_pgs_jul26)) | |
| # %% [markdown] | |
| # There seem to be 152 page IDs that don't match to either manifest. | |
| # %% | |
| missing_pgs[:10] | |
| # %% | |
| # Why are these floats...does it matter? | |
| missing_pgs_int = [int(pg) for pg in missing_pgs] | |
| int_missing_pgs = [] | |
| for pg in missing_pgs_int: | |
| if pg not in pg_ids_media_new: | |
| int_missing_pgs.append(pg) | |
| print(len(int_missing_pgs)) | |
| print(int_missing_pgs[:5]) | |
| # %% [markdown] | |
| # It does not matter. There seem to be 152 missing pages, let's try making a couple into URLs... | |
| # | |
| # The first has a page (https://eol.org/pages/47186210) without images. Let's compare these 152 page IDs to our `category.csv` page IDs. Maybe these were not added because there were no images (still odd they exist in `predicted-category.csv`, but not in the manifest). | |
| # %% | |
| cat_pgs = set(eol_cat_df.eol_page_id) | |
| print(f"There are {len(cat_pgs)} total page ids in the July 26 media manifest.") | |
| missing_cat_pgs = [] | |
| for pg in missing_pgs: | |
| if pg not in cat_pgs: | |
| missing_cat_pgs.append(pg) | |
| print(len(missing_cat_pgs)) | |
| # %% [markdown] | |
| # Nope, these are all in `category.csv`. | |
| # | |
| # Another no image page (https://eol.org/pages/47186225), [this](https://eol.org/pages/46334362) has more data, but still no images. https://eol.org/pages/47186380 & https://eol.org/pages/47121005 also don't show any images. | |
| # | |
| # Seems the images for all of these were removed or moved to other pages... | |
| # | |
| # Let's make a CSV for the missing pages to check that we do indeed have the images (sanity check), and we can compare the taxa! | |
| # %% | |
| missing_pgs_df = eol_taxa_df_missing_media.loc[eol_taxa_df_missing_media["eol_page_id"].isin(missing_pgs)] | |
| missing_pgs_df = missing_pgs_df[cols_of_interest] | |
| missing_pgs_df.info() | |
| # %% | |
| missing_pgs_df.sample(10) | |
| # %% | |
| #missing_pgs_df.to_csv("../data/eol_files/catalog_missing_media_pages.csv", index = False) | |
| # %% [markdown] | |
| # ### Save File with EOL Page IDs & Number of missing content IDs associated with each | |
| # %% | |
| # Count and record number of content IDs for each page ID | |
| for pg_id in pg_ids_missing_content: | |
| content_id_list = ['{}'.format(i) for i in eol_taxa_df_missing_media.loc[eol_taxa_df_missing_media['eol_page_id'] == pg_id]['eol_content_id'].unique()] | |
| eol_taxa_df_missing_media.loc[eol_taxa_df_missing_media['eol_page_id'] == pg_id, "num_content_ids_missing"] = len(content_id_list) | |
| cols_of_interest.append("num_content_ids_missing") | |
| eol_taxa_df_missing_media[cols_of_interest].head() | |
| # %% | |
| #unique page_ids | |
| eol_taxa_df_missing_media['duplicate'] = eol_taxa_df_missing_media.duplicated(subset = "eol_page_id", keep = 'first') | |
| eol_taxa_df_num_missing_pg = eol_taxa_df_missing_media.loc[~eol_taxa_df_missing_media['duplicate']] | |
| eol_taxa_df_num_missing_pg.info() | |
| # %% [markdown] | |
| # This file has the relevant info relating to number of missing content IDs per page id, content ID included is just the first instance of such a content ID. | |
| # %% | |
| #eol_taxa_df_num_missing_pg[cols_of_interest].to_csv("../data/eol_files/eol_taxa_df_num_missing_pg.csv", index = False) | |
| # %% | |
| jul26_page_df = media.loc[media.eol_page_id.isin(pg_ids_missing_content)] | |
| jul26_page_df.info() | |
| # %% | |
| jul26_page_df.nunique() | |
| # %% [markdown] | |
| # Yes, that's the expected number of unique page IDs. Let's save to CSV for download. | |
| # %% | |
| #jul26_page_df.to_csv("../data/eol_files/jul26_pages.csv", index = False) | |
| # %% | |
| dec6_page_df = media_new.loc[media_new.eol_page_id.isin(pg_ids_missing_content)] | |
| dec6_page_df.info() | |
| # %% [markdown] | |
| # Okay, we have 5 more entries here, so let's compare unique counts and consider this one. | |
| # %% | |
| dec6_page_df.nunique() | |
| # %% | |
| #dec6_page_df.to_csv("../data/eol_files/dec6_pages.csv", index = False) | |
| # %% [markdown] | |
| # #### Check Older Media Manifest for Missing Pages | |
| # | |
| # Let's take a look at the July 6th media manifest to see if these pages are there. | |
| # %% | |
| media_old.rename(columns = {"EOL page ID": "eol_page_id"}, inplace = True) | |
| pg_ids_media_old = set(media_old.eol_page_id) | |
| print(f"There are {len(pg_ids_media_old)} total page ids in the July 6 media manifest.") | |
| missing_pgs_jul6 = [] | |
| for pg in missing_pgs: | |
| if pg not in pg_ids_media_old: | |
| missing_pgs_jul6.append(pg) | |
| print(len(missing_pgs_jul6)) | |
| # %% [markdown] | |
| # It seems the missing pages are in the _older_ media manifest! | |
| # | |
| # Let's merge this with the `missing_pgs_df`, so we can get URLs to download from the pages there. | |
| # %% | |
| # Count and record number of content IDs for each page ID | |
| for pg_id in missing_pgs: | |
| content_id_list_mp = ['{}'.format(i) for i in missing_pgs_df.loc[missing_pgs_df['eol_page_id'] == pg_id]['eol_content_id'].unique()] | |
| missing_pgs_df.loc[missing_pgs_df['eol_page_id'] == pg_id, "num_content_ids_missing"] = len(content_id_list_mp) | |
| missing_pgs_df.head() | |
| # %% | |
| #unique page_ids | |
| missing_pgs_df['duplicate'] = missing_pgs_df.duplicated(subset = "eol_page_id", keep = 'first') | |
| eol_taxa_num_missing_pgs_df = missing_pgs_df.loc[~missing_pgs_df['duplicate']] | |
| eol_taxa_num_missing_pgs_df.info() | |
| # %% | |
| older_page_df = media_old.loc[media_old.eol_page_id.isin(missing_pgs)] | |
| older_page_df.info() | |
| # %% | |
| older_page_df.loc[older_page_df.eol_page_id.astype(str) == "4446364.0"] | |
| # %% [markdown] | |
| # Looks good, let's save to CSV. | |
| # %% | |
| #older_page_df.to_csv("../data/eol_files/media_old_pages.csv", index = False) | |
| # %% [markdown] | |
| # ## Check EOL License file(s) | |
| # | |
| # First we'll look at `eol_licenses.csv` from Sam, which only covers `catalog.csv`, so load both these in to make sure we've got full coverage for all included images (Matt's first match attempt from file created above couldn't find ~113K based on `eol_content_id`). | |
| # %% | |
| cat_df = pd.read_csv("../data/catalog.csv", dtype = {"eol_content_id": "int64", "eol_page_id": "int64"}, low_memory = False) | |
| license_df = pd.read_csv("../data/eol_files/eol_licenses.csv", | |
| dtype = {"eol_content_id": "int64", "eol_page_id": "int64"}, | |
| low_memory = False) | |
| # %% [markdown] | |
| # The `train_small` is duplicates of `train`, so we will drop those to analyze the full training set plus val. | |
| # %% | |
| cat_df = cat_df.loc[cat_df.split != 'train_small'] | |
| # %% | |
| # Add data_source column for easier slicing | |
| cat_df.loc[cat_df['inat21_filename'].notna(), 'data_source'] = 'iNat21' | |
| cat_df.loc[cat_df['bioscan_filename'].notna(), 'data_source'] = 'BIOSCAN' | |
| cat_df.loc[cat_df['eol_content_id'].notna(), 'data_source'] = 'EOL' | |
| # %% | |
| eol_df = cat_df.loc[cat_df.data_source == "EOL"] | |
| # %% | |
| license_df.head() | |
| # %% | |
| license_df.shape | |
| # %% | |
| eol_df.shape | |
| # %% [markdown] | |
| # Yeah, we're missing about 23K images in the license file. | |
| # %% | |
| license_df.info(show_counts = True) | |
| # %% | |
| license_df.loc[license_df["owner"].isna(), "license"].value_counts() | |
| # %% [markdown] | |
| # CC BY licenses without `owner` indicated is rather problematic. | |
| # %% | |
| license_df.loc[license_df["owner"].isna()].sample(5) | |
| # %% [markdown] | |
| # Tracked down `eol_content_id` [14796160](https://eol.org/media/14796160), original source is [BioImages](https://www.bioimages.org.uk/image.php?id=79950) with copyright Malcolm Storey (like 99% of the images on the site (see their [conditions of use](https://www.bioimages.org.uk/cright.htm))). He is listed as "compiler" on the EOL media page. | |
| # %% | |
| license_df.license.value_counts() | |
| # %% | |
| license_df.loc[license_df["license"] == "No known copyright restrictions"].sample(5) | |
| # %% | |
| #license_df.loc[license_df["owner"].isna()].to_csv("../data/eol_files/eol_licenses_missing_owner.csv", index = False) | |
| # %% | |