python_code
stringlengths
0
258k
"""Training algorithm track submission functions for LibriSpeech.""" from typing import Dict, Iterator, List, Tuple import numpy as np import torch import torch.distributed.nn as dist_nn from algorithmic_efficiency import spec from algorithmic_efficiency.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_s...
from typing import Dict, Iterator, List, Tuple import numpy as np import torch import torch.distributed.nn as dist_nn from algorithmic_efficiency import spec from algorithmic_efficiency.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] def get_batch_size(workload_name): batch_sizes = {'wmt'...
"""Training algorithm track submission functions for WMT.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): batch_sizes = {'wmt': 128} return ba...
"""Training algorithm track submission functions for ImageNet.""" from typing import Dict, Iterator, List, Tuple import torch from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR from algorithmic_efficiency import spec ...
"""Training algorithm track submission functions for ImageNet.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): # Return the ...
"""Training algorithm track submission functions for LibriSpeech.""" import functools from typing import Dict, Iterator, List, Tuple from absl import logging from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import numpy as np import optax from algorithmic_efficiency import spec _GRAD...
"""Training algorithm track submission functions for LibriSpeech.""" from typing import Dict, Iterator, List, Tuple import numpy as np import torch import torch.distributed.nn as dist_nn from algorithmic_efficiency import spec from algorithmic_efficiency.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_s...
from typing import Dict, Iterator, List, Tuple import torch import torch.distributed.nn as dist_nn from algorithmic_efficiency import spec from algorithmic_efficiency.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] def get_batch_size(workload_name): # Return the global batch size. batch...
from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): # Return the global batch size. batch_sizes = {'ogbg': 2048} return batch_sizes[workload_name...
"""Training algorithm track submission functions for ImageNet.""" from typing import Dict, Iterator, List, Tuple import torch from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR from algorithmic_efficiency import spec ...
"""Training algorithm track submission functions for ImageNet.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): # Return the ...
from typing import Dict, Iterator, List, Tuple import torch from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR from algorithmic_efficiency import spec def get_batch_size(workload_name): batch_sizes = {'criteo1tb': ...
"""Training algorithm track submission functions for Criteo1TB DLRM-Small.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): #...
"""Training algorithm track submission functions for FastMRI.""" from typing import Dict, Iterator, List, Tuple import torch from torch.optim.lr_scheduler import StepLR from algorithmic_efficiency import spec def get_batch_size(workload_name): # Return the global batch size. batch_sizes = {'fastmri': 8} retu...
"""Training algorithm track submission functions for FastMRI in Jax.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): # Retur...
"""Training algorithm track submission functions for CIFAR10.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec def get_batch_size(workload_name): # Return the g...
"""Training algorithm track submission functions for CIFAR10.""" from typing import Dict, Iterator, List, Tuple import torch from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR from algorithmic_efficiency import spec ...
"""Template submission module. See https://github.com/mlcommons/algorithmic-efficiency/blob/main/RULES.md#allowed-submissions and https://github.com/mlcommons/algorithmic-efficiency/blob/main/RULES.md#disallowed-submissions for guidelines. """ def init_optimizer_state(workload: spec.Workload, ...
import json import os import re from absl import logging import pandas as pd TRIAL_LINE_REGEX = '(.*) --- Tuning run (\d+)/(\d+) ---' METRICS_LINE_REGEX = '(.*) Metrics: ({.*})' TRIAL_DIR_REGEX = 'trial_(\d+)' MEASUREMENTS_FILENAME = 'eval_measurements.csv' #### File IO helper functions ### def get_logfile_paths(lo...
from absl.testing import absltest import scoring_utils TEST_LOGFILE = 'test_data/adamw_fastmri_jax_04-18-2023-13-10-58.log' TEST_DIR = 'test_data/experiment_dir' NUM_EVALS = 18 class Test(absltest.TestCase): def test_get_trials_dict(self): trials_dict = scoring_utils.get_trials_dict(TEST_LOGFILE) self.ass...
"""Performance and scoring code. The three primary methods exposed by the `scoring` module are: - `compute_performance_profiles`: generates performance profiles for a set of submissions over all workloads as defined in the scoring rules: https://github.com/mlcommons/algorithmic-efficiency/blob/main/RULES.md - `com...
import os from absl import app from absl import flags from absl import logging import scoring_utils from algorithmic_efficiency import workloads import scoring flags.DEFINE_string( 'experiment_path', None, 'Path to experiment directory containing workload directories.') flags.DEFINE_string('submission_ta...
"""Submission file for a LAMB optimizer with warmup+cosine LR in PyTorch.""" import math from typing import Dict, Iterator, List, Tuple from absl import logging import torch from torch import Tensor from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.optim.l...
"""Submission file for a LAMB optimizer with warmup+cosine LR in Jax.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec _GRAD_CLIP_EPS = 1e-6 def scale_by_learnin...
"""Submission file for an NAdamW optimizer with warmup+cosine LR in PyTorch.""" import math from typing import Dict, Iterator, List, Tuple from absl import logging import torch from torch import Tensor import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_sc...
"""Submission file for an NAdamW optimizer with warmup+cosine LR in Jax.""" import functools # isort: off # We have to turn off isort here to resolve a conflict between isort and yapf. from typing import (Any, Callable, Dict, Iterator, Li...
"""Submission file for an AdamW optimizer with warmup+cosine LR in PyTorch.""" from typing import Dict, Iterator, List, Tuple from absl import logging import torch import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.opt...
"""Submission file for an AdamW optimizer with warmup+cosine LR in Jax.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec _GRAD_CLIP_EPS = 1e-6 def init_optimizer...
"""Submission file for Adafactor in PyTorch.""" from functools import partial from typing import Dict, Iterator, List, Tuple from absl import logging import torch import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from torch.opti...
"""Submission file for an Adafactor optimizer with warmup+cosine LR in Jax.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec from baselines.adafactor.jax.sharded_ad...
# coding=utf-8 # Copyright 2023 The init2winit Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable la...
"""Submission file for a SAM optimizer with warmup+cosine LR in PyTorch.""" from typing import Callable, Dict, Iterator, List, Tuple from absl import logging import torch import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import LinearLR from to...
"""Submission file for a SAM optimizer with warmup+cosine LR in Jax.""" import functools from typing import Dict, Iterator, List, Optional, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec _GRAD_CLIP_EPS = 1e-6 # Copied fr...
"""Submission file for a SGD with HeavyBall momentum optimizer in PyTorch.""" from typing import Callable, Dict, Iterator, List, Tuple from absl import logging import optax import torch import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import LambdaLR from algorithmic_efficiency import spec from a...
"""Submission file for a SGD with HeavyBall momentum optimizer in Jax.""" import functools from typing import Callable, Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec _GRAD_CLIP_EPS = 1e-6 def init_...
"""Submission file for a SGD with Nesterov momentum optimizer in PyTorch.""" from typing import Callable, Dict, Iterator, List, Tuple from absl import logging import optax import torch import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import LambdaLR from algorithmic_efficiency import spec from al...
"""Submission file for a SGD with Nesterov momentum optimizer in Jax.""" import functools from typing import Callable, Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec _GRAD_CLIP_EPS = 1e-6 def init_o...
"""Submission file for a Shampoo optimizer with warmup+cosine LR in Jax.""" import functools from typing import Dict, Iterator, List, Tuple from flax import jax_utils import jax from jax import lax import jax.numpy as jnp import optax from algorithmic_efficiency import spec from baselines.shampoo.jax.distributed_sha...
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
import argparse import os import yaml from collections import OrderedDict import cwrap_parser import nn_parse import native_parse import preprocess_declarations import function_wrapper import copy_wrapper from code_template import CodeTemplate # This file is the top-level entry point for code generation in ATen. #...
# this code should be common among cwrap and ATen preprocessing # for now, I have put it in one place but right now is copied out of cwrap from copy import deepcopy from itertools import product def parse_arguments(args): new_args = [] for arg in args: # Simple arg declaration of form "<type> <name>"...
import re from copy import deepcopy from function_wrapper import TYPE_FORMAL_GENERIC import common_with_cwrap type_map = { 'floating_point': [ 'Float', 'Double', 'Half', ], 'integral': [ 'Byte', 'Char', 'Short', 'Int', 'Long' ], } all_typ...
import re # match $identifier or ${identifier} and replace with value in env # If this identifier is at the beginning of whitespace on a line # and its value is a list then it is treated as # block subsitution by indenting to that depth and putting each element # of the list on its own line # if the identifier is on a...
import copy import re import common_with_cwrap import yaml from collections import OrderedDict, defaultdict try: # use faster C loader if available from yaml import CLoader as Loader except ImportError: from yaml import Loader # matches `name`, `params` in `name(params)` NAME_PARAM_REGEX = r'(\w+)\((.*)\...
# HEY! Trying to understand what this file does? Read # "what has to be done to add a Operation ..." first! import re from code_template import CodeTemplate try: import typing # noqa: F401 except ImportError: raise RuntimeError( 'Missing build dependency: Unable to import the `typing` module. ' ...
import yaml # follows similar logic to cwrap, ignores !inc, and just looks for [[]] def parse(filename): with open(filename, 'r') as file: declaration_lines = [] declarations = [] in_declaration = False for line in file.readlines(): line = line.rstrip() if ...
from optparse import OptionParser parser = OptionParser() parser.add_option('-o', '--output', help='where to write the result file.', action='store', default='.') options, _ = parser.parse_args() files = [ # '../../csrc/cudnn/cuDNN.cwrap', '../../csrc/generic/TensorMethods.cwrap', # '../...
import re import yaml try: # use faster C loader if available from yaml import CLoader as Loader except ImportError: from yaml import Loader def parse_default(s): if s.lower() == 'true': return True elif s.lower() == 'false': return False elif s == 'nullptr': return s ...
from code_template import CodeTemplate from function_wrapper import nested_dict FILE = CodeTemplate("""\ #include "ATen/Config.h" #include "TH/TH.h" #if AT_CUDA_ENABLED() #undef THNN_ #include "THC/THC.h" #endif #include "ATen/Utils.h" ${copy_includes} namespace at { ${copy_functions} } """) COPY = CodeTemplate("...