{ "cells": [ { "cell_type": "markdown", "id": "62acacf6", "metadata": {}, "source": [ "# Get Expanded Dataset\n", "\n", "In this notebook, we take the json-only dataset of models on HuggingFace (given in the document `ai_ecosystems_jsons.csv') and we produce an expanded csv dataset where the json elements are expanded into fields. This will give us the tabular dataset that we post online *without model cards*." ] }, { "cell_type": "code", "execution_count": null, "id": "0c7933f2", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "\n", "# Extract parent information from tags\n", "def extract_parents(tags):\n", " parent_list = []\n", " finetune_parents = []\n", " quantized_parents = []\n", " adapter_parents = []\n", " merge_parents = []\n", " for tag in tags:\n", " if tag.startswith(\"base_model:\") and tag.count(\":\") == 1:\n", " parent_list.append(tag[len(\"base_model:\"):])\n", " if tag.startswith(\"base_model:finetune:\"):\n", " finetune_parents.append(tag[len(\"base_model:finetune:\"):]) \n", " elif tag.startswith(\"base_model:quantized:\"):\n", " quantized_parents.append(tag[len(\"base_model:quantized:\"):])\n", " elif tag.startswith(\"base_model:adapter:\"):\n", " adapter_parents.append(tag[len(\"base_model:adapter:\"):])\n", " elif tag.startswith(\"base_model:merge:\"):\n", " merge_parents.append(tag[len(\"base_model:merge:\"):])\n", " return (parent_list, finetune_parents, quantized_parents, adapter_parents, merge_parents)\n", "\n", "# Extract tag information from tags\n", "def extract_languages(tags):\n", " languages = []\n", " for tag in tags:\n", " if len(str(tag))==2 and tag in pycountry.languages.get(alpha_2=tag).name:\n", " languages.append(tag)\n", " return languages" ] }, { "cell_type": "code", "execution_count": 4, "id": "d7a579df", "metadata": {}, "outputs": [], "source": [ "# Read the raw data\n", "raw_df = pd.read_csv(\"ai_ecosystem_jsons.csv\")\n", "\n", "# Convert the fullJson column to a pandas dataframe\n", "processed_df = pd.json_normalize(raw_df['fullJson'].apply(eval))" ] }, { "cell_type": "code", "execution_count": 5, "id": "b0ce22c8", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
_ididlikestrendingScoreprivatedownloadstagspipeline_taglibrary_namecreatedAtmodelId
0687060f05721fba56ca177a8moonshotai/Kimi-K2-Instruct479479.0False13356[transformers, safetensors, kimi_k2, text-gene...text-generationtransformers2025-07-11T00:55:12.000Zmoonshotai/Kimi-K2-Instruct
1685ffb0a9c4d599d2a98bc2cTHUDM/GLM-4.1V-9B-Thinking569367.0False33839[transformers, safetensors, glm4v, image-text-...image-text-to-texttransformers2025-06-28T14:24:10.000ZTHUDM/GLM-4.1V-9B-Thinking
2686ceee17e3b40a013a9afdcHuggingFaceTB/SmolLM3-3B351351.0False21863[transformers, safetensors, smollm3, text-gene...text-generationtransformers2025-07-08T10:11:45.000ZHuggingFaceTB/SmolLM3-3B
368378cef5cbef05290b4d045black-forest-labs/FLUX.1-Kontext-dev1568247.0False230863[diffusers, safetensors, image-generation, flu...image-to-imagediffusers2025-05-28T22:23:43.000Zblack-forest-labs/FLUX.1-Kontext-dev
46867e3f036e90a4761150310mistralai/Devstral-Small-2507155155.0False5090[vllm, safetensors, mistral, text2text-generat...text-generationvllm2025-07-04T14:23:44.000Zmistralai/Devstral-Small-2507
....................................
1860406687297c640b0f18259d1ea4aAmal17/NusaBERT-concate-BiGRU-NusaParagraph-emot00.0False0[license:apache-2.0, region:us]NaNNaN2025-07-12T17:13:42.000ZAmal17/NusaBERT-concate-BiGRU-NusaParagraph-emot
1860407687297d7a00511012546e84ejackrvn/bidirectional-dialect-translator00.0False0[transformers, safetensors, t5, text2text-gene...text-generationtransformers2025-07-12T17:13:59.000Zjackrvn/bidirectional-dialect-translator
1860408687297d89565c9eeb52b8391Amal17/NusaBERT-concate-BiGRU-NusaParagraph-topic00.0False0[license:apache-2.0, region:us]NaNNaN2025-07-12T17:14:00.000ZAmal17/NusaBERT-concate-BiGRU-NusaParagraph-topic
1860409687297df0a5c0185126e5a16ond-ai/ond-agent-1.3-8b-ckpt-100.0False0[region:us]NaNNaN2025-07-12T17:14:07.000Zond-ai/ond-agent-1.3-8b-ckpt-1
1860410687297e1ed57a961d8154fbbjackrvn/biderectional-dialect-translator00.0False0[transformers, arxiv:1910.09700, endpoints_com...NaNtransformers2025-07-12T17:14:09.000Zjackrvn/biderectional-dialect-translator
\n", "

1860411 rows × 11 columns

\n", "
" ], "text/plain": [ " _id \\\n", "0 687060f05721fba56ca177a8 \n", "1 685ffb0a9c4d599d2a98bc2c \n", "2 686ceee17e3b40a013a9afdc \n", "3 68378cef5cbef05290b4d045 \n", "4 6867e3f036e90a4761150310 \n", "... ... \n", "1860406 687297c640b0f18259d1ea4a \n", "1860407 687297d7a00511012546e84e \n", "1860408 687297d89565c9eeb52b8391 \n", "1860409 687297df0a5c0185126e5a16 \n", "1860410 687297e1ed57a961d8154fbb \n", "\n", " id likes \\\n", "0 moonshotai/Kimi-K2-Instruct 479 \n", "1 THUDM/GLM-4.1V-9B-Thinking 569 \n", "2 HuggingFaceTB/SmolLM3-3B 351 \n", "3 black-forest-labs/FLUX.1-Kontext-dev 1568 \n", "4 mistralai/Devstral-Small-2507 155 \n", "... ... ... \n", "1860406 Amal17/NusaBERT-concate-BiGRU-NusaParagraph-emot 0 \n", "1860407 jackrvn/bidirectional-dialect-translator 0 \n", "1860408 Amal17/NusaBERT-concate-BiGRU-NusaParagraph-topic 0 \n", "1860409 ond-ai/ond-agent-1.3-8b-ckpt-1 0 \n", "1860410 jackrvn/biderectional-dialect-translator 0 \n", "\n", " trendingScore private downloads \\\n", "0 479.0 False 13356 \n", "1 367.0 False 33839 \n", "2 351.0 False 21863 \n", "3 247.0 False 230863 \n", "4 155.0 False 5090 \n", "... ... ... ... \n", "1860406 0.0 False 0 \n", "1860407 0.0 False 0 \n", "1860408 0.0 False 0 \n", "1860409 0.0 False 0 \n", "1860410 0.0 False 0 \n", "\n", " tags \\\n", "0 [transformers, safetensors, kimi_k2, text-gene... \n", "1 [transformers, safetensors, glm4v, image-text-... \n", "2 [transformers, safetensors, smollm3, text-gene... \n", "3 [diffusers, safetensors, image-generation, flu... \n", "4 [vllm, safetensors, mistral, text2text-generat... \n", "... ... \n", "1860406 [license:apache-2.0, region:us] \n", "1860407 [transformers, safetensors, t5, text2text-gene... \n", "1860408 [license:apache-2.0, region:us] \n", "1860409 [region:us] \n", "1860410 [transformers, arxiv:1910.09700, endpoints_com... \n", "\n", " pipeline_tag library_name createdAt \\\n", "0 text-generation transformers 2025-07-11T00:55:12.000Z \n", "1 image-text-to-text transformers 2025-06-28T14:24:10.000Z \n", "2 text-generation transformers 2025-07-08T10:11:45.000Z \n", "3 image-to-image diffusers 2025-05-28T22:23:43.000Z \n", "4 text-generation vllm 2025-07-04T14:23:44.000Z \n", "... ... ... ... \n", "1860406 NaN NaN 2025-07-12T17:13:42.000Z \n", "1860407 text-generation transformers 2025-07-12T17:13:59.000Z \n", "1860408 NaN NaN 2025-07-12T17:14:00.000Z \n", "1860409 NaN NaN 2025-07-12T17:14:07.000Z \n", "1860410 NaN transformers 2025-07-12T17:14:09.000Z \n", "\n", " modelId \n", "0 moonshotai/Kimi-K2-Instruct \n", "1 THUDM/GLM-4.1V-9B-Thinking \n", "2 HuggingFaceTB/SmolLM3-3B \n", "3 black-forest-labs/FLUX.1-Kontext-dev \n", "4 mistralai/Devstral-Small-2507 \n", "... ... \n", "1860406 Amal17/NusaBERT-concate-BiGRU-NusaParagraph-emot \n", "1860407 jackrvn/bidirectional-dialect-translator \n", "1860408 Amal17/NusaBERT-concate-BiGRU-NusaParagraph-topic \n", "1860409 ond-ai/ond-agent-1.3-8b-ckpt-1 \n", "1860410 jackrvn/biderectional-dialect-translator \n", "\n", "[1860411 rows x 11 columns]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "processed_df" ] }, { "cell_type": "code", "execution_count": null, "id": "90a612e9", "metadata": {}, "outputs": [], "source": [ "# We'd like region, base_model, license, arxiv, dataset\n", "#processed_df['region_count'] = processed_df['tags'].apply(lambda x: x.count('region:'))\n", "processed_df['regions'] = processed_df['tags'].apply(lambda x: [tag.replace('region:', '') for tag in x if tag.startswith('region:')])\n", "#processed_df['region_count'] = processed_df['regions'].apply(lambda x: len(x))\n", "\n", "processed_df['licenses'] = processed_df['tags'].apply(lambda x: [tag.replace('license:', '') for tag in x if tag.startswith('license:')])\n", "#processed_df['license_count'] = processed_df['licenses'].apply(lambda x: len(x))\n", "\n", "processed_df['arxiv_papers'] = processed_df['tags'].apply(lambda x: [tag.replace('arxiv:', '') for tag in x if tag.startswith('arxiv:')])\n", "#processed_df['arxiv_count'] = processed_df['arxiv_papers'].apply(lambda x: len(x))\n", "\n", "processed_df['datasets'] = processed_df['tags'].apply(lambda x: [tag.replace('dataset:', '') for tag in x if tag.startswith('dataset:')])\n", "#processed_df['dataset_count'] = processed_df['datasets'].apply(lambda x: len(x))\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "534641f2", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
model_idlikestrendingScoreprivatedownloadstagspipeline_taglibrary_namecreatedAtregion_count...license_countarxiv_papersarxiv_countdatasetsdataset_countparent_modelfinetune_parentquantized_parentadapter_parentmerge_parent
0moonshotai/Kimi-K2-Instruct479479.0False13356[transformers, safetensors, kimi_k2, text-gene...text-generationtransformers2025-07-11T00:55:12.000Z1...1[]0[]0[][][][][]
1THUDM/GLM-4.1V-9B-Thinking569367.0False33839[transformers, safetensors, glm4v, image-text-...image-text-to-texttransformers2025-06-28T14:24:10.000Z1...1[2507.01006]1[]0[THUDM/GLM-4-9B-0414][THUDM/GLM-4-9B-0414][][][]
2HuggingFaceTB/SmolLM3-3B351351.0False21863[transformers, safetensors, smollm3, text-gene...text-generationtransformers2025-07-08T10:11:45.000Z1...1[]0[]0[][][][][]
3black-forest-labs/FLUX.1-Kontext-dev1568247.0False230863[diffusers, safetensors, image-generation, flu...image-to-imagediffusers2025-05-28T22:23:43.000Z1...1[2506.15742]1[]0[][][][][]
4mistralai/Devstral-Small-2507155155.0False5090[vllm, safetensors, mistral, text2text-generat...text-generationvllm2025-07-04T14:23:44.000Z1...1[]0[]0[mistralai/Mistral-Small-3.1-24B-Instruct-2503][mistralai/Mistral-Small-3.1-24B-Instruct-2503][][][]
\n", "

5 rows × 23 columns

\n", "
" ], "text/plain": [ " model_id likes trendingScore private \\\n", "0 moonshotai/Kimi-K2-Instruct 479 479.0 False \n", "1 THUDM/GLM-4.1V-9B-Thinking 569 367.0 False \n", "2 HuggingFaceTB/SmolLM3-3B 351 351.0 False \n", "3 black-forest-labs/FLUX.1-Kontext-dev 1568 247.0 False \n", "4 mistralai/Devstral-Small-2507 155 155.0 False \n", "\n", " downloads tags \\\n", "0 13356 [transformers, safetensors, kimi_k2, text-gene... \n", "1 33839 [transformers, safetensors, glm4v, image-text-... \n", "2 21863 [transformers, safetensors, smollm3, text-gene... \n", "3 230863 [diffusers, safetensors, image-generation, flu... \n", "4 5090 [vllm, safetensors, mistral, text2text-generat... \n", "\n", " pipeline_tag library_name createdAt region_count \\\n", "0 text-generation transformers 2025-07-11T00:55:12.000Z 1 \n", "1 image-text-to-text transformers 2025-06-28T14:24:10.000Z 1 \n", "2 text-generation transformers 2025-07-08T10:11:45.000Z 1 \n", "3 image-to-image diffusers 2025-05-28T22:23:43.000Z 1 \n", "4 text-generation vllm 2025-07-04T14:23:44.000Z 1 \n", "\n", " ... license_count arxiv_papers arxiv_count datasets dataset_count \\\n", "0 ... 1 [] 0 [] 0 \n", "1 ... 1 [2507.01006] 1 [] 0 \n", "2 ... 1 [] 0 [] 0 \n", "3 ... 1 [2506.15742] 1 [] 0 \n", "4 ... 1 [] 0 [] 0 \n", "\n", " parent_model \\\n", "0 [] \n", "1 [THUDM/GLM-4-9B-0414] \n", "2 [] \n", "3 [] \n", "4 [mistralai/Mistral-Small-3.1-24B-Instruct-2503] \n", "\n", " finetune_parent quantized_parent \\\n", "0 [] [] \n", "1 [THUDM/GLM-4-9B-0414] [] \n", "2 [] [] \n", "3 [] [] \n", "4 [mistralai/Mistral-Small-3.1-24B-Instruct-2503] [] \n", "\n", " adapter_parent merge_parent \n", "0 [] [] \n", "1 [] [] \n", "2 [] [] \n", "3 [] [] \n", "4 [] [] \n", "\n", "[5 rows x 23 columns]" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Append parent information to the dataset\n", "processed_df[['parent_model','finetune_parent', 'quantized_parent', 'adapter_parent', 'merge_parent']] = pd.DataFrame(\n", " processed_df['tags'].apply(extract_parents).tolist(), index=processed_df.index\n", ")\n", "\n", "# Drop the columns \"_id\" and \"modelId\" (the former is unneeded, the latter is redundant)\n", "processed_df.drop(columns=['_id', 'modelId'], inplace=True)\n", "\n", "# Rename the column \"id\" to \"model_id\"\n", "processed_df.rename(columns={'id': 'model_id'}, inplace=True)\n", "\n", "processed_df.head()" ] }, { "cell_type": "code", "execution_count": null, "id": "405a1f18", "metadata": {}, "outputs": [], "source": [ "import pycountry\n", "\n", "# Add the languages information\n", "processed_df['languages'] = processed_df['tags'].apply(extract_languages)\n", "\n", "for " ] }, { "cell_type": "code", "execution_count": 28, "id": "d63d9aee", "metadata": {}, "outputs": [ { "ename": "ModuleNotFoundError", "evalue": "No module named 'pycountry'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[28], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mpycountry\u001b[39;00m\n\u001b[1;32m 3\u001b[0m pycountry\u001b[38;5;241m.\u001b[39mlanguages\u001b[38;5;241m.\u001b[39mget(alpha_2\u001b[38;5;241m=\u001b[39mtag)\u001b[38;5;241m.\u001b[39mname\n", "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'pycountry'" ] } ], "source": [ "import pycountry\n", "\n", "pycountry.languages.get(alpha_2=tag).name" ] }, { "cell_type": "code", "execution_count": null, "id": "a97c5f78", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "e874cfe5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "31c88ced", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "683a3159", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1dbc6f23", "metadata": {}, "outputs": [], "source": [ "processed_df['license_count'] = processed_df['tags'].apply(lambda x: x.count('license:'))\n", "processed_df['license_list'] = processed_df['tags'].apply(lambda x: [tag.replace('license:', '') for tag in list(ast.literal_eval(x)) if tag.startswith('license:')])\n", "\n", "processed_df['arxiv_count'] = processed_df['tags'].apply(lambda x: x.count('arxiv:'))\n", "processed_df['arxiv_papers'] = processed_df['tags'].apply(lambda x: [tag.replace('arxiv:', '') for tag in list(ast.literal_eval(x)) if tag.startswith('arxiv:')])\n", "\n", "processed_df['dataset_count'] = processed_df['tags'].apply(lambda x: x.count('dataset:'))\n", "processed_df['dataset_list'] = processed_df['tags'].apply(lambda x: [tag.replace('dataset:', '') for tag in list(ast.literal_eval(x)) if tag.startswith('dataset:')])\n" ] }, { "cell_type": "code", "execution_count": null, "id": "4760cd6b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "38701172", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "e6ab875b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "6d57a559", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processed 500000 rows\n", "Processed 1000000 rows\n", "Processed 1500000 rows\n" ] } ], "source": [ "import ast\n", "\n", "list_set_all_tag_categories = []\n", "set_all_tag_categories = set()\n", "prog = 0\n", "for index, row in processed_df.iterrows():\n", " for tag in row['tags']:\n", " if tag.count(\":\") > 0:\n", " category = tag.split(\":\")[0]\n", " list_set_all_tag_categories.append(category)\n", " if category not in set_all_tag_categories:\n", " set_all_tag_categories.add(category)\n", " #list_all_tags.append(category)\n", " prog += 1\n", " if prog % 100000 == 0:\n", " print(f\"Processed {prog} rows\")\n", "\n", "\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 13, "id": "38eaf55e", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[('region', 1860414),\n", " ('base_model', 1163751),\n", " ('license', 671895),\n", " ('arxiv', 416367),\n", " ('dataset', 229630),\n", " ('diffusers', 33337),\n", " ('template', 20595),\n", " ('loss', 6554),\n", " ('dataset_size', 5171),\n", " ('doi', 2702),\n", " ('BaseLM', 1512),\n", " ('adapterhub', 733),\n", " ('tags', 187),\n", " ('model', 186),\n", " ('repo_name', 186),\n", " ('file_name', 186),\n", " ('pruning_style', 186),\n", " ('community', 186),\n", " ('pruning_ratio', 186),\n", " ('dataset_label', 186),\n", " ('sparsity_ratio', 186),\n", " ('finetune', 186),\n", " ('modules_size', 186),\n", " ('modules', 186),\n", " ('rank', 186),\n", " ('anndata_version', 183),\n", " ('tissue', 180),\n", " ('modality', 118),\n", " ('model_cls_name', 107),\n", " ('annotated', 107),\n", " ('scvi_version', 105),\n", " ('python_version', 77),\n", " ('#', 33),\n", " ('https', 22),\n", " ('pipeline', 11),\n", " ('$', 9),\n", " ('benchmark', 7),\n", " ('arXiv', 7),\n", " ('Mi', 6),\n", " ('Voice', 6),\n", " ('version', 4),\n", " ('format', 4),\n", " ('library', 3),\n", " ('type', 3),\n", " ('Dramatical Murder Re', 3),\n", " ('sparsity‑2', 3),\n", " ('TikTok', 2),\n", " ('twitter', 2),\n", " ('inference', 2),\n", " ('3', 2),\n", " ('voice', 2),\n", " ('generated', 2),\n", " ('cs', 1),\n", " ('@', 1),\n", " ('*', 1),\n", " (' $', 1),\n", " ('http', 1),\n", " ('mytag', 1),\n", " ('Skill', 1),\n", " ('ai new new models. It has been generated using [this raw tempfor new models. It has been generated using [this raw template](https',\n", " 1),\n", " ('lr', 1),\n", " ('epochs', 1),\n", " ('lora-dropout', 1),\n", " ('train-batch', 1),\n", " ('optim', 1),\n", " ('weight-decay', 1),\n", " ('gradient_accumulation_steps', 1),\n", " ('lora-r', 1),\n", " ('lora-alpha', 1),\n", " ('dataset-size', 1),\n", " ('about', 1),\n", " ('', 1),\n", " ('pipeline_tag', 1),\n", " ('queued_at', 1),\n", " ('costPerHr', 1),\n", " ('gpu', 1),\n", " ('started_at', 1),\n", " ('started_training_at', 1),\n", " ('status', 1),\n", " ('completed_at', 1),\n", " ('Type-Count', 1),\n", " ('19', 1),\n", " ('- lora - peft - gemma - safesky-ai - ai-safety - sft - hh-rlhf - text-generation - transformers base_model',\n", " 1),\n", " ('volume', 1),\n", " ('adapterhub_tag', 1),\n", " ('datasets', 1)]" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Count frequency of values in list_set_all_tag_categories\n", "from collections import Counter\n", "\n", "# Count frequency of values in list_set_all_tag_categories\n", "tag_category_counts = Counter(list_set_all_tag_categories)\n", "\n", "# Display the most common tag categories\n", "tag_category_counts.most_common()" ] }, { "cell_type": "code", "execution_count": null, "id": "ee8c37fd", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.12" } }, "nbformat": 4, "nbformat_minor": 5 }