| import os |
| import json |
| from huggingface_hub import HfApi |
| import glob |
| from datetime import datetime |
| from datasets import Dataset |
|
|
| TOKEN = os.environ.get("HF_WRITE_TOKEN") |
| API = HfApi(token=TOKEN) |
| REPO_ID = "AIEnergyScore/results_debug" |
| UPLOAD_REPO_ID = 'meg/HUGS_energy' |
|
|
| output_directory = API.snapshot_download(repo_id=REPO_ID, repo_type='dataset') |
| print(output_directory) |
| |
| |
| dataset_results = [] |
| for task in ['text_generation']: |
| org_dirs = glob.glob(f"{output_directory}/{task}/*") |
| print(org_dirs) |
| for org_dir in org_dirs: |
| org = org_dir.split("/")[-1] |
| model_dirs = glob.glob(f"{org_dir}/*") |
| print(model_dirs) |
| for model_dir in model_dirs: |
| model = model_dir.split("/")[-1] |
| model_runs = glob.glob(f"{model_dir}/*") |
| dates = [dir.split("/")[-1] for dir in model_runs] |
| try: |
| |
| sorted_dates = sorted( |
| [datetime.strptime(date, '%Y-%m-%d-%H-%M-%S') for date in |
| dates]) |
| |
| sorted_dates_str = [date.strftime('%Y-%m-%d-%H-%M-%S') for date in |
| sorted_dates] |
| last_date = sorted_dates_str[-1] |
| most_recent_run = f"{model_dir}/{last_date}" |
| print(most_recent_run) |
| try: |
| benchmark_report = json.loads(open(f"{most_recent_run}/benchmark_report.json", "rb+").read()) |
| print(benchmark_report) |
| prefill_data = benchmark_report['prefill'] |
| prefill_energy = prefill_data['energy'] |
| prefill_efficiency = prefill_data['efficiency'] |
| decode_data = benchmark_report['decode'] |
| decode_energy = decode_data['energy'] |
| decode_efficiency = decode_data['efficiency'] |
| preprocess_data = benchmark_report['preprocess'] |
| preprocess_energy = preprocess_data['energy'] |
| preprocess_efficiency = preprocess_data['efficiency'] |
| dataset_results += [{'task':task, 'org':org, 'model':model, 'hardware':'a10g-large', |
| 'date':last_date, 'prefill':{'energy':prefill_energy, |
| 'efficency':prefill_efficiency}, |
| 'decode':{'energy':decode_energy, 'efficiency':decode_efficiency}, |
| 'preprocess': {'energy':preprocess_energy, 'efficiency': preprocess_efficiency}},] |
|
|
| except FileNotFoundError: |
| error_report = open(f"{most_recent_run}/error.log", "rb+").read() |
| print(error_report) |
| except ValueError: |
| |
| continue |
|
|
| hub_dataset_results = Dataset.from_list(dataset_results) |
| print(hub_dataset_results) |
| hub_dataset_results.push_to_hub(UPLOAD_REPO_ID, token=TOKEN) |