python_code stringlengths 0 108k |
|---|
from .sharded_model_v2 import ShardedModelV2
__all__ = ['ShardedModelV2'] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
from typing import Callable, Dict, List, Optional, Tuple
import torch
import torch.distributed as dist
from torch ... |
import torch
from colossalai.zero.sharded_model import ShardedModelV2
import copy
def col_model_deepcopy(sharded_model: ShardedModelV2, other_model: torch.nn.Module):
"""
copy param of the ShardedModelV2 to other_model.
Note the other_model has to be the same as self.
"""
for zero_param, param in... |
import functools
from collections import OrderedDict
from typing import Any, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine.ophooks import register_ophooks_re... |
from typing import Any, Callable, List, Tuple
import torch
import torch.nn.functional as F
from typing import Union
from colossalai.zero.sharded_param.tensorful_state import StatefulTensor
def get_gradient_predivide_factor(world_size: int) -> float:
factor: int = 1
while world_size % factor == 0 and world_si... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp impo... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from enum import Enum
class AMP_TYPE(Enum):
APEX = 'apex'
TORCH = 'torch'
NAIVE = 'naive'
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.distributed as dist
try:
import colossal_C
except:
print('Colossalai should be built with cuda extension to use the FP16 optimizer')
from torch.optim import Optimizer
from colossalai.core import global_context as gpc
from colossalai.co... |
import inspect
import torch.nn as nn
from torch.optim import Optimizer
from colossalai.utils import is_no_pp_or_last_stage
from .naive_amp import NaiveAMPOptimizer, NaiveAMPModel
from .grad_scaler import DynamicGradScaler, ConstantGradScaler
from ._fp16_optimizer import FP16Optimizer
def convert_to_naive_amp(model: n... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.distributed as dist
from torch import Tensor
from typing import Any
from torch.optim import Optimizer
from torch.distributed import ReduceOp
from colossalai.core import global_context as gpc
from colossalai.context import P... |
from typing import List
from torch import Tensor
def has_inf_or_nan(tensor):
try:
# if tensor is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as tensor
# (which is true for some recent version of p... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .base_grad_scaler import BaseGradScaler
__all__ = ['ConstantGradScaler']
class ConstantGradScaler(BaseGradScaler):
def __init__(self, initial_scale: int, verbose: bool):
super().__init__(initial_scale, verbose)
self.log(f"Constant Gradient Sca... |
from .base_grad_scaler import BaseGradScaler
from .constant_grad_scaler import ConstantGradScaler
from .dynamic_grad_scaler import DynamicGradScaler
__all__ = ['BaseGradScaler', 'ConstantGradScaler', 'DynamicGradScaler']
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from abc import ABC, abstractmethod
from colossalai.logging import get_dist_logger
from torch import Tensor
from typing import Dict
__all__ = ['BaseGradScaler']
class BaseGradScaler(ABC):
def __init__(self, initial_scale: float, verbose: bool):
... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from .base_grad_scaler import BaseGradScaler
from typing import Optional
__all__ = ['DynamicGradScaler']
class DynamicGradScaler(BaseGradScaler):
def __init__(self,
initial_scale: float = 2**16,
growth_factor: float ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# modified from https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py
# to support tensor parallel
import torch
from collections import defaultdict, abc
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.nn as nn
import torch.cuda.amp as torch_amp
from torch import Tensor
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from ._grad_scaler import GradScaler
from colossalai.nn.optimizer import ColossalaiOptimizer
from colossalai.util... |
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from colossalai.context import Config
from .torch_amp import TorchAMPOptimizer, TorchAMPModel, TorchAMPLoss
from typing import Optional
def convert_to_torch_amp(model: nn.Module,
optimizer: Optimiz... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.nn as nn
try:
import apex.amp as apex_amp
except ImportError:
pass
from torch import Tensor
from colossalai.nn.optimizer import ColossalaiOptimizer
from colossalai.utils import clip_grad_norm_fp32
class ApexAMPOptimizer(ColossalaiOptimizer):
... |
from .apex_amp import ApexAMPOptimizer
import torch.nn as nn
from torch.optim import Optimizer
def convert_to_apex_amp(model: nn.Module, optimizer: Optimizer, amp_config):
r"""A helper function to wrap training components with Apex AMP modules
Args:
model (:class:`torch.nn.Module`): your model object... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from types import ModuleType
from typing import List
class Registry:
"""This is a registry class used to register classes and modules so that a universal
object builder can be enabled.
Args:
name (str): The name of the registry .
third_par... |
import torch.distributed.optim as dist_optim
import torch.nn as nn
import torch.optim as optim
import torchvision.models as tv_models
import torchvision.datasets as tv_datasets
from torchvision import transforms
from .registry import Registry
LAYERS = Registry("layers", third_party_library=[nn])
LOSSES = Registry("lo... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.distributed as dist
from torch.distributed import ReduceOp
from torch import Tensor
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
def all_gather(tensor: Tensor,
dim: int,
... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import List, Tuple, Union
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from functools import reduce
i... |
from .collective import all_gather, reduce_scatter, all_reduce, broadcast, reduce
from .p2p import (send_forward, send_forward_recv_forward, send_backward_recv_forward, send_backward,
send_backward_recv_backward, send_forward_recv_backward, send_forward_backward_recv_forward_backward,
... |
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
def send_tensor_meta(tensor, need_meta=True, next_rank=None):
"""Sends tensor meta information before sending a... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device, synchronize
def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode):
"""... |
from .builder import (build_schedule, build_lr_scheduler, build_model,
build_optimizer, build_layer, build_loss, build_hooks,
build_dataset, build_transform, build_data_sampler,
build_gradient_handler, build_ophooks)
from .pipeline import build_pipeline_... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import inspect
from collections.abc import Iterable
from colossalai.registry import *
def build_from_config(module, config: dict):
"""Returns an object of :class:`module` constructed from `config`.
Args:
module: A python or user-defined class
... |
import copy
import heapq
from colossalai.builder import build_model, build_layer
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
import torch.nn as nn
def _binary_partition(weights, st, ed):
"""Returns the ... |
from ._base_engine import Engine
from .gradient_handler import *
__all__ = ['Engine']
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from asyncio.log import logger
from typing import List, Iterable
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from colossalai.logging import get_dist_logger
from torch import Tensor
from colossalai.engine.ophooks ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC, abstractmethod
import torch
from typing import Iterable, Callable
from colossalai.logging import get_dist_logger
from colossalai.utils import get_current_device
class BaseSchedule(ABC):
"""A basic helper class to control the process of traini... |
from ._base_schedule import BaseSchedule
from ._pipeline_schedule import PipelineSchedule, InterleavedPipelineSchedule, get_tensor_shape
from ._non_pipeline_schedule import NonPipelineSchedule
__all__ = ['BaseSchedule', 'NonPipelineSchedule', 'PipelineSchedule', 'InterleavedPipelineSchedule', 'get_tensor_shape']
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Iterable
import torch
from ._base_schedule import BaseSchedule
from colossalai.utils import conditional_context
class NonPipelineSchedule(BaseSchedule):
"""A helper schedule class for no pipeline parallelism running environment.
During one ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import inspect
from typing import Callable, List, Tuple, Union
import colossalai.communication as comm
import torch.cuda
from colossalai.amp.naive_amp import NaiveAMPModel
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_conte... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC, abstractmethod
class BaseGradientHandler(ABC):
"""A basic helper class to handle all-reduce operations of gradients across different parallel groups
before optimization.
Args:
model (Module): Model where the gradients accumula... |
from ._base_gradient_handler import BaseGradientHandler
from ._data_parallel_gradient_handler import DataParallelGradientHandler
from ._zero_gradient_handler import ZeROGradientHandler
from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler
from ._pipeline_parallel_gradient_handler import Pipel... |
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from ._base_gradient_handler import BaseGradientHandler
from ...context.parallel_mode import ParallelMode
from .utils import bucket_allreduce
@GRADIENT_HANDLER.register_module
class DataParallelGradientHandler(BaseGradi... |
#!/usr/bin/env python
from collections import defaultdict
import torch
import torch.distributed as dist
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ._base_gradient_handler import Base... |
import torch.distributed as dist
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from typing import Iterable
def bucket_allreduce(param_list: Iterable[nn.Parameter], group=None):
# get communication world size
comm_size = dist.get_world_size(group)
# bucketi... |
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from colossalai.utils.moe import get_moe_epsize_param_dict
from ._base_gradient_handler import BaseGradientHandler
from ...context.parallel_mode import ParallelMode
from .utils import bucket_allreduce
from colossalai.cont... |
from colossalai.registry import GRADIENT_HANDLER
from ._base_gradient_handler import BaseGradientHandler
@GRADIENT_HANDLER.register_module
class ZeROGradientHandler(BaseGradientHandler):
"""A helper class to handle all-reduce operations in a data parallel group.
A all-reduce collective communication will be o... |
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from ._base_gradient_handler import BaseGradientHandler
from ...context.parallel_mode import ParallelMode
from .utils import bucket_allreduce
@GRADIENT_HANDLER.register_module
class SequenceParallelGradientHandler(BaseG... |
from ._param_hookmgr import BaseParamHookMgr
__all__ = ["BaseParamHookMgr"]
|
from typing import Callable, List
import torch
import functools
class BaseParamHookMgr(object):
def __init__(self, param_list: List[torch.nn.Parameter]) -> None:
r"""
register backward hook on every parameters of module
"""
self._param_list = param_list
self._hook_list = [... |
from .utils import register_ophooks_recursively, BaseOpHook
from ._memtracer_ophook import MemTracerOpHook
__all__ = ["BaseOpHook", "MemTracerOpHook", "register_ophooks_recursively"]
|
import torch
from typing import List, Callable, Optional
from abc import ABC, abstractmethod
import torch
class BaseOpHook(ABC):
"""This class allows users to add customized operations
before and after the execution of a PyTorch submodule"""
def __init__(self):
pass
@abstractmethod
def ... |
import torch
from colossalai.registry import OPHOOKS
from . import BaseOpHook
@OPHOOKS.register_module
class ShardParamHook(BaseOpHook):
"""
A hook to process sharded param before and afther FWD and BWD operator executing.
"""
def __init__(self):
super().__init__()
def niter(self):
... |
import json
import pickle
from pathlib import Path
from colossalai.context.parallel_mode import ParallelMode
import torch
from colossalai.engine.ophooks import BaseOpHook
from colossalai.registry import OPHOOKS
from colossalai.logging import get_dist_logger
from colossalai.core import global_context as gpc
from typing ... |
import torch
from colossalai.registry import OPHOOKS
from . import BaseOpHook
@OPHOOKS.register_module
class ShardGradHook(BaseOpHook):
"""
A hook to process sharded param before and afther FWD and BWD operator executing.
"""
def __init__(self):
super().__init__()
def pre_fwd_exec(self,... |
import logging
from typing import List, Optional
from .logger import DistributedLogger
__all__ = ['get_dist_logger', 'DistributedLogger', 'disable_existing_loggers']
def get_dist_logger(name='colossalai'):
"""Get logger instance based on name. The DistributedLogger will create singleton instances,
which mea... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import colossalai
import logging
from pathlib import Path
from typing import Union
import inspect
from colossalai.context.parallel_mode import ParallelMode
try:
from rich.logging import RichHandler
_FORMAT = 'colossalai - %(name)s - %(levelname)s: %(message)s'
... |
from ._trainer import Trainer
__all__ = ['Trainer']
|
from typing import Union, List
from colossalai.context.parallel_mode import ParallelMode
import torch
from torch import Tensor
from torch.utils.data import DataLoader
from tqdm import tqdm
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import Distribute... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC, abstractmethod
from typing import Callable
import torch
import torch.distributed as dist
from colossalai.communication import all_reduce
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.re... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC
from torch import Tensor
class BaseHook(ABC):
"""This class allows users to add desired actions in specific time points
during training or evaluation.
:param priority: Priority in the printing, hooks with small priority will be printed... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import os.path as osp
import torch
from typing import List
from decimal import Decimal
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import HOOKS
from colossalai.logging import Distribute... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from colossalai.logging import get_dist_logger
from colossalai.registry import HOOKS
from colossalai.trainer.hooks import BaseHook
from colossalai.utils.checkpointing import save_checkpoint
from ._lr_scheduler_hook import LRSchedulerHook
@HOOKS.register_mo... |
from ._base_hook import BaseHook
from ._checkpoint_hook import SaveCheckpointHook
from ._log_hook import (LogMemoryByEpochHook, LogMetricByEpochHook, LogMetricByStepHook, LogTimingByEpochHook,
TensorboardHook)
from ._lr_scheduler_hook import LRSchedulerHook
from ._metric_hook import AccuracyHook... |
from colossalai.registry import HOOKS
from torch import Tensor
from ._metric_hook import LearningRateMetric, MetricHook
@HOOKS.register_module
class LRSchedulerHook(MetricHook):
r"""Build LR scheduler for trainer.
Args:
lr_scheduler (:class:`colossalai.nn.lr_scheduler`): The specific LR scheduler
... |
from colossalai.registry import HOOKS
from torch import Tensor
from colossalai.trainer.hooks import BaseHook
from colossalai.utils.memory_tracer import AsyncMemoryMonitor
@HOOKS.register_module
class MemTraceHook(BaseHook):
"""Save memory stats and pass it to states
This hook is used to record memory usage in... |
from .cuda_native import LayerNorm, FusedScaleMaskSoftmax, MultiHeadAttention
__all__ = ["LayerNorm", "FusedScaleMaskSoftmax", "MultiHeadAttention"]
|
import torch
JIT_OPTIONS_SET = False
def set_jit_fusion_options():
"""Set PyTorch JIT layer fusion options.
"""
# LSG: the latest pytorch and CUDA versions may not support
# the following jit settings
global JIT_OPTIONS_SET
if JIT_OPTIONS_SET == False:
# flags required to enable jit f... |
from .option import set_jit_fusion_options
from .bias_dropout_add import bias_dropout_add_fused_train, bias_dropout_add_fused_inference
from .bias_gelu import bias_gelu_impl
__all__ = [
"bias_dropout_add_fused_train", "bias_dropout_add_fused_inference", "bias_gelu_impl",
"set_jit_fusion_options"
]
|
import torch
def bias_dropout_add(x, bias, residual, prob, training):
# type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
out = residual + out
return out
@torch.jit.script
def bias_dropout_add_fused_train(x: torch.Tensor,
... |
import torch
###### BIAS GELU FUSION/ NO AUTOGRAD ################
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + ... |
import math
import importlib
from dataclasses import dataclass
import torch
from torch import nn
from torch.autograd import Function
def check_config(config):
if config.hidden_size % config.nhead != 0:
raise Exception("hidden_size % nhead != 0")
factor = 8 if config.fp16 else 4
upbound = factor ... |
from .layer_norm import MixedFusedLayerNorm as LayerNorm
from .scaled_softmax import FusedScaleMaskSoftmax
from .multihead_attention import MultiHeadAttention
|
"""This code from NVIDIA Megatron
with some changes. """
import torch
import torch.nn as nn
import enum
class AttnMaskType(enum.Enum):
padding = 1
causal = 2
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
... |
"""This code is from NVIDIA apex:
https://github.com/NVIDIA/apex
with some changes. """
import numbers
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.cuda.amp import custom_fwd, custom_bwd
import importlib
global colossal_layer_norm_cuda
colossal_layer_norm_cuda = ... |
import dataclasses
import os
import random
import re
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Any, Mapping, NamedTuple, Text, Tuple, Union
from ml_collections import ConfigDict
import dill
import flax
import jax
import jax.numpy as jnp
import msgpac... |
import inspect
import logging
import os
import pprint
import random
import tempfile
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from socket import gethostname
import absl.flags
import absl.logging
import cloudpickle as pickle
import flax
import gcsfs
import jax
impor... |
import dataclasses
import pprint
import re
from functools import partial
import absl.app
import absl.flags
import flax
import jax
import jax.numpy as jnp
import numpy as np
import optax
from flax import linen as nn
from flax.jax_utils import prefetch_to_device
from flax.training.train_state import TrainState
from jax.... |
# coding=utf-8
# Copyright 2022 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ... |
import dataclasses
import pprint
import re
from functools import partial
import absl.app
import absl.flags
import flax
import jax
import jax.numpy as jnp
import numpy as np
import optax
import wandb
from absl import logging
from flax import linen as nn
from flax.jax_utils import prefetch_to_device
from flax.training.t... |
# coding=utf-8
# Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
# Modifications copyright 2022 Xinyang Geng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.ap... |
import dataclasses
import pprint
import re
from functools import partial
import absl.app
import absl.flags
import flax
import jax
import jax.numpy as jnp
import numpy as np
import optax
import wandb
from absl import logging
from flax import linen as nn
from flax.jax_utils import prefetch_to_device
from flax.training.t... |
import copy
import pprint
import re
from functools import partial
import absl.app
import absl.flags
import flax
import jax
import jax.numpy as jnp
import numpy as np
import optax
from absl import logging
from flax import linen as nn
from flax.jax_utils import prefetch_to_device
from flax.training.train_state import Tr... |
import dataclasses
import os
import pprint
import re
from functools import partial
from threading import Lock
import absl.logging
import numpy as np
import wandb
from flask import Flask, request
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from tqdm import tqdm, trange
cla... |
import dataclasses
import json
import os
import pprint
import time
import urllib
from functools import partial
import absl.app
import absl.flags
import absl.logging
import numpy as np
import requests
import wandb
from flax.traverse_util import flatten_dict
from lm_eval import evaluator, tasks
from lm_eval.base import ... |
import dataclasses
import pprint
import random
import warnings
from collections import defaultdict
from functools import partial
from io import BytesIO
import gcsfs
import h5py
import numpy as np
import torch
from datasets import interleave_datasets, load_dataset
from ml_collections import ConfigDict
from ml_collectio... |
from coh.data.hf_data import HumanFeedbackDataset
from coh.data.pt_data import PretrainDataset
|
import dataclasses
import pprint
from functools import partial
from io import BytesIO
import gcsfs
import h5py
import numpy as np
from datasets import load_dataset
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from tqdm import tqdm, trange
class PretrainDataset(object):
... |
summary_template = [
("a good sumary is {pos}", "a bad sumary is {neg}"),
("the following is a good sumary {pos}", "the following is a bad sumary {neg}"),
("generate a good sumary: {pos}", "generate a bad sumary: {neg}"),
("good sumary: {pos}", "bad sumary: {neg}"),
("good: {pos}", "bad: {neg}"),
... |
# sb3_a2c.py
from stable_baselines3 import A2C
model = A2C(
"MlpPolicy", "CartPole-v0", verbose=0,
device="cpu", seed=1,
)
model.learn(total_timesteps=3000)
for name, param in model.policy.named_parameters():
if param.requires_grad:
layer_param_sum = round(param.data.sum().item(), 4)
print(... |
# sb3_ppo.py
import torch as th
from stable_baselines3 import PPO
model = PPO(
"MlpPolicy", "CartPole-v0", verbose=0,
device="cpu", seed=1,
policy_kwargs=dict(
optimizer_class=th.optim.RMSprop,
optimizer_kwargs=dict(
alpha=0.99, eps=1e-5, weight_decay=0,
),
), # mat... |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Root test configuration** (i.e., early-time configuration guaranteed to be
run by :mod:`pytest` *before* passed command-line argu... |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype installer.**
This submodule conforms to the standard :mod:`setuptools`-based "makefile"
format, instrumenting most hig... |
#!/usr/bin/env python3
# Torture test designed to find hotspots in @beartype at decoration time.
import yappi
from beartype import beartype
def run_beartype_decorator():
for _ in range(10000):
@beartype
def ugh(text: list[str]) -> int:
# def ugh(text: str) -> int:
return len(t... |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype.**
For :pep:`8` compliance, this namespace exposes a subset of the metadata
constants published by the :mod:`beartype.... |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype metadata.**
This submodule exports global constants synopsizing this package -- including
versioning and dependencies.... |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **object utilities** (i.e., supplementary low-level functions
handling arbitrary objects in a general-purpose manner)... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.