| import requests |
| import pandas as pd |
| from tqdm.auto import tqdm |
|
|
| import gradio as gr |
| from huggingface_hub import HfApi, hf_hub_download |
| from huggingface_hub.repocard import metadata_load |
|
|
|
|
| |
| |
| def make_clickable_model(model_name): |
| link = "https://huggingface.co/" + model_name |
| return f'<a style="text-decoration: underline; color: #1f3b54 " target="_blank" href="{link}">{model_name}</a>' |
|
|
| |
| def make_clickable_user(user_id): |
| link = "https://huggingface.co/" + user_id |
| return f'<a style="text-decoration: underline; color: #1f3b54 " target="_blank" href="{link}">{user_id}</a>' |
| |
| def get_model_ids(rl_env): |
| api = HfApi() |
| models = api.list_models(filter=rl_env) |
| model_ids = [x.modelId for x in models] |
| return model_ids |
| |
| def get_metadata(model_id): |
| try: |
| readme_path = hf_hub_download(model_id, filename="README.md") |
| return metadata_load(readme_path) |
| except requests.exceptions.HTTPError: |
| |
| return None |
| |
| def parse_metrics_accuracy(meta): |
| if "model-index" not in meta: |
| return None |
| result = meta["model-index"][0]["results"] |
| metrics = result[0]["metrics"] |
| accuracy = metrics[0]["value"] |
| |
| return accuracy |
|
|
| |
| def parse_rewards(accuracy): |
| if accuracy != None: |
| parsed = accuracy.split(' +/- ') |
| mean_reward = float(parsed[0]) |
| std_reward = float(parsed[1]) |
| else: |
| mean_reward = -1000 |
| std_reward = -1000 |
| return mean_reward, std_reward |
|
|
| def get_data(rl_env): |
| data = [] |
| model_ids = get_model_ids(rl_env) |
| for model_id in tqdm(model_ids): |
| meta = get_metadata(model_id) |
| if meta is None: |
| continue |
| user_id = model_id.split('/')[0] |
| row = {} |
| row["User"] = user_id |
| row["Model"] = model_id |
| accuracy = parse_metrics_accuracy(meta) |
| |
| mean_reward, std_reward = parse_rewards(accuracy) |
| |
| row["Results"] = mean_reward - std_reward |
| row["Mean Reward"] = mean_reward |
| row["Std Reward"] = std_reward |
| data.append(row) |
| return pd.DataFrame.from_records(data) |
|
|
|
|
| def get_data_per_env(rl_env): |
| dataframe = get_data(rl_env) |
| dataframe = dataframe.fillna("") |
|
|
| if not dataframe.empty: |
| |
| dataframe["User"] = dataframe["User"].apply(make_clickable_user) |
| dataframe["Model"] = dataframe["Model"].apply(make_clickable_model) |
| dataframe = dataframe.sort_values(by=['Results'], ascending=False) |
| table_html = dataframe.to_html(escape=False, index=False) |
| table_html = table_html.replace("<table>", '<table style="width: 100%; margin: auto; border:0.5px solid; border-spacing: 7px 0px">') |
| |
| table_html = table_html.replace("<thead>", '<thead align="left">') |
|
|
| table_html = "<div style='text-align: left ; width:100%'>"+table_html+"</div>" |
| return table_html,dataframe,dataframe.empty |
| else: |
| html = """<div style="color: green"> |
| <p> β Please wait. Results will be out soon... </p> |
| </div> |
| """ |
| return html,dataframe,dataframe.empty |
|
|
|
|
|
|
| RL_ENVS = ['LunarLander-v2','CarRacing-v0','MountainCar-v0'] |
| |
| RL_DETAILS ={'CarRacing-v0':{'title':" The Car Racing ποΈ Leaderboard π",'data':get_data_per_env('CarRacing-v0')}, |
| 'MountainCar-v0':{'title':"The Mountain Car β°οΈ π Leaderboard π",'data':get_data_per_env('MountainCar-v0')}, |
| 'LunarLander-v2':{'title':" The Lunar Lander π Leaderboard π",'data':get_data_per_env('LunarLander-v2')} |
| } |
|
|
|
|
| def reload_leaderboard(rl_env): |
| |
| global RL_DETAILS |
| RL_DETAILS[rl_env]['data'] = get_data_per_env(rl_env) |
| |
| data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data'] |
| |
| if not is_empty: |
| markdown = """ |
| # {name_leaderboard} |
| |
| This is a leaderboard of **{len_dataframe}** agents playing {env_name} π©βπ. |
| |
| We use lower bound result to sort the models: mean_reward - std_reward. |
| |
| You can click on the model's name to be redirected to its model card which includes documentation. |
| |
| You want to try your model? Read this [Unit 1](https://github.com/huggingface/deep-rl-class/blob/Unit1/unit1/README.md) of Deep Reinforcement Learning Class. |
| |
| |
| """.format(len_dataframe = len(data_dataframe),env_name = rl_env,name_leaderboard = RL_DETAILS[rl_env]['title']) |
|
|
| else: |
| markdown = """ |
| # {name_leaderboard} |
| """.format(name_leaderboard = RL_DETAILS[rl_env]['title']) |
|
|
| return markdown,data_html |
| |
| |
|
|
|
|
| block = gr.Blocks() |
| with block: |
| |
| with gr.Tabs(): |
| for rl_env in RL_ENVS: |
| with gr.TabItem(rl_env): |
| data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data'] |
| |
| if not is_empty: |
| markdown = """ |
| # {name_leaderboard} |
| |
| This is a leaderboard of **{len_dataframe}** agents playing {env_name} π©βπ. |
| |
| We use lower bound result to sort the models: mean_reward - std_reward. |
| |
| You can click on the model's name to be redirected to its model card which includes documentation. |
| |
| You want to try your model? Read this [Unit 1](https://github.com/huggingface/deep-rl-class/blob/Unit1/unit1/README.md) of Deep Reinforcement Learning Class. |
| |
| |
| """.format(len_dataframe = len(data_dataframe),env_name = rl_env,name_leaderboard = RL_DETAILS[rl_env]['title']) |
|
|
| else: |
| markdown = """ |
| # {name_leaderboard} |
| """.format(name_leaderboard = RL_DETAILS[rl_env]['title']) |
|
|
| reload = gr.Button('Reload Leaderboard') |
| |
| output_markdown = gr.Markdown(markdown) |
| output_html = gr.HTML(data_html) |
| reload.click(reload_leaderboard,inputs=[rl_env],outputs=[output_markdown,output_html]) |
| |
|
|
| block.launch() |
|
|