python_code stringlengths 0 229k |
|---|
# flake8: noqa
# Mirrors torch/optim __init__ to allow for symmetric import structure
from .adadelta import AdadeltaConf
from .adagrad import AdagradConf
from .adam import AdamConf
from .adamw import AdamWConf
from .sparse_adam import SparseAdamConf
from .adamax import AdamaxConf
from .asgd import ASGDConf
from .sgd im... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import A... |
import re
import functools
from copy import deepcopy
import torch
from torch.autograd import Variable
from torch import sparse
from torch import optim
from torch import nn
import torchcontrib.optim as contriboptim
from .common import TestCase, run_tests
from torch.utils import data
def rosenbrock(tensor):
x, y = ... |
import unittest
import torch
import torchcontrib
import torchcontrib.nn as contrib_nn
import torchcontrib.nn.functional as contrib_F
from torch.autograd import gradcheck, gradgradcheck
from .common import run_tests, TestCase
class TestNN(TestCase):
def assertGradAndGradgradChecks(self, apply_fn, inputs):
... |
# Pavel: copied without changes from pytorch/test/common.py
import sys
import os
import platform
import re
import gc
import types
import inspect
import argparse
import unittest
import warnings
import random
import contextlib
from functools import wraps
from itertools import product
from copy import deepcopy
from numbe... |
from . import nn
from . import optim
|
from .modules import *
from . import functional
|
def film(input, gamma, beta):
r"""Applies Feature-wise Linear Modulation to the incoming data.
See :class:`~torchcontrib.nn.FiLM` for details.
"""
if input.dim() < 2:
raise ValueError("film expects input to be at least 2-dimensional, but "
"got input of size {}".format(... |
import torch
from torch.nn import Module
from .. import functional as F
class FiLM(Module):
r"""Applies Feature-wise Linear Modulation to the incoming data as described
in the paper `FiLM: Visual Reasoning with a General Conditioning Layer`_ .
.. math::
y_{n,c,*} = \gamma_{n, c} * x_{n,c,*} + \b... |
from .linear import FiLM
__all__ = ['FiLM']
|
from collections import defaultdict
from itertools import chain
from torch.optim import Optimizer
import torch
import warnings
class SWA(Optimizer):
def __init__(self, optimizer, swa_start=None, swa_freq=None, swa_lr=None):
r"""Implements Stochastic Weight Averaging (SWA).
Stochastic Weight Avera... |
from .swa import SWA
|
## @package process
# Module doxygen.process
# Script to insert preamble for doxygen and regen API docs
import glob, os, shutil
# Module caffe2...caffe2.python.control_test
def insert(originalfile,first_line,description):
with open(originalfile,'r') as f:
f1 = f.readline()
if(f1.find(first_line)<0... |
## @package publish
# Module doxygen.publish
import os, shutil
if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-c"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("/Users/aaronmarkham/caffe2/doxygen-c")
if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-... |
import os
import torch
from torch.utils.ffi import create_extension
this_file = os.path.dirname(__file__)
sources = ['src/my_lib.c']
headers = ['src/my_lib.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/my_lib_cuda.c']
headers += ['src/my_l... |
import torch
import torch.nn as nn
from torch.autograd import Variable
from modules.add import MyAddModule
class MyNetwork(nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.add = MyAddModule()
def forward(self, input1, input2):
return self.add(input1, input2)
mod... |
# functions/add.py
import torch
from torch.autograd import Function
from _ext import my_lib
class MyAddFunction(Function):
def forward(self, input1, input2):
output = input1.new()
if not input1.is_cuda:
my_lib.my_lib_add_forward(input1, input2, output)
else:
my_lib.... |
from torch.nn.modules.module import Module
from functions.add import MyAddFunction
class MyAddModule(Module):
def forward(self, input1, input2):
return MyAddFunction()(input1, input2)
|
import os
import torch
from torch.utils.ffi import create_extension
this_file = os.path.dirname(__file__)
sources = ['my_package/src/my_lib.c']
headers = ['my_package/src/my_lib.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['my_package/src/my_lib_c... |
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
import build
this_file = os.path.dirname(__file__)
setup(
name="my_package",
version="0.1",
description="An example project using PyTorch FFI",
url="https://github.com/pytorch/ffi-examples",
author="XYZ",
... |
import torch
import torch.nn as nn
from torch.autograd import Variable
from my_package.modules.add import MyAddModule
class MyNetwork(nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.add = MyAddModule()
def forward(self, input1, input2):
return self.add(input1, in... |
# functions/add.py
import torch
from torch.autograd import Function
from .._ext import my_lib
class MyAddFunction(Function):
def forward(self, input1, input2):
output = input1.new()
if not input1.is_cuda:
my_lib.my_lib_add_forward(input1, input2, output)
else:
my_li... |
from torch.nn.modules.module import Module
from ..functions.add import MyAddFunction
class MyAddModule(Module):
def forward(self, input1, input2):
return MyAddFunction()(input1, input2)
|
import torch
torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so")
def compute(x, y, z):
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + torch.relu(z)
inputs = [torch.randn(4, 8), torch.randn(8, 5), torch.randn(8, 5)]
trace = torch.jit.trace(compute... |
# Run `python setup.py build develop` before running this example!
import torch
torch.ops.load_library("warp_perspective.so")
print(torch.ops.my_ops.warp_perspective)
|
import torch
torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so")
print(torch.ops.my_ops.warp_perspective(torch.randn(32, 32), torch.rand(3, 3)))
|
import torch
import torch.utils.cpp_extension
op_source = """
#include <opencv2/opencv.hpp>
#include <torch/script.h>
torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
cv::Mat image_mat(/*rows=*/image.size(0),
/*cols=*/image.size(1),
/*type=*/CV_32FC1,... |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name="warp_perspective",
ext_modules=[
CppExtension(
"warp_perspective",
["example_app/warp_perspective/op.cpp"],
libraries=["opencv_core", "opencv_imgproc"],
... |
import torch
import torch.utils.cpp_extension
torch.utils.cpp_extension.load(
name="warp_perspective",
sources=["example_app/warp_perspective/op.cpp"],
extra_ldflags=["-lopencv_core", "-lopencv_imgproc"],
is_python_module=False,
verbose=True
)
print(torch.ops.my_ops.warp_perspective)
|
import torch
torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so")
@torch.jit.script
def compute(x, y):
if bool(x[0][0] == 42):
z = 5
else:
z = 10
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + z
print(compute.graph)
print(c... |
from __future__ import division
from __future__ import print_function
import argparse
import math
import time
import torch
TIME_SCALES = {'s': 1, 'ms': 1000, 'us': 1000000}
parser = argparse.ArgumentParser()
parser.add_argument('example', choices=['py', 'cpp', 'cuda'])
parser.add_argument('-b', '--batch-size', type... |
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch
import python.lltm_baseline
import cpp.lltm
def check_equal(first, second, verbose):
if verbose:
print()
for i, (x, y) in enumerate(zip(first, second)):
x = x.cpu().detach().... |
from __future__ import division
from __future__ import print_function
import argparse
import torch
from torch.autograd import gradcheck
parser = argparse.ArgumentParser()
parser.add_argument('example', choices=['py', 'cpp', 'cuda'])
parser.add_argument('-b', '--batch-size', type=int, default=3)
parser.add_argument('-... |
import math
import torch
import torch.nn.functional as F
torch.manual_seed(42)
class LLTM(torch.nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
# 3 * state_size for input gat... |
import math
from torch import nn
from torch.autograd import Function
import torch
import torch.nn.functional as F
torch.manual_seed(42)
def d_sigmoid(z):
s = torch.sigmoid(z)
return (1 - s) * s
def d_tanh(z):
t = torch.tanh(z)
return 1 - (t * t)
def d_elu(z, alpha=1.0):
e = z.exp()
mask ... |
from torch.utils.cpp_extension import load
lltm_cuda = load(
'lltm_cuda', ['lltm_cuda.cpp', 'lltm_cuda_kernel.cu'], verbose=True)
help(lltm_cuda)
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='lltm_cuda',
ext_modules=[
CUDAExtension('lltm_cuda', [
'lltm_cuda.cpp',
'lltm_cuda_kernel.cu',
]),
],
cmdclass={
'build_ext': BuildExtension
... |
import math
from torch import nn
from torch.autograd import Function
import torch
import lltm_cuda
torch.manual_seed(42)
class LLTMFunction(Function):
@staticmethod
def forward(ctx, input, weights, bias, old_h, old_cell):
outputs = lltm_cuda.forward(input, weights, bias, old_h, old_cell)
new... |
from torch.utils.cpp_extension import load
lltm_cpp = load(name="lltm_cpp", sources=["lltm.cpp"], verbose=True)
help(lltm_cpp)
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='lltm_cpp',
ext_modules=[
CppExtension('lltm_cpp', ['lltm.cpp']),
],
cmdclass={
'build_ext': BuildExtension
})
|
import math
from torch import nn
from torch.autograd import Function
import torch
import lltm_cpp
torch.manual_seed(42)
class LLTMFunction(Function):
@staticmethod
def forward(ctx, input, weights, bias, old_h, old_cell):
outputs = lltm_cpp.forward(input, weights, bias, old_h, old_cell)
new_h... |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__(... |
###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model.
#
###############################################################################
import argparse
import torch
import data
parser = a... |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__... |
# coding: utf-8
import argparse
import time
import math
import os
import torch
import torch.nn as nn
import torch.onnx
import data
import model
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM/GRU/Transformer Language Model')
parser.add_argument('--data', type=str, default='./data/wikitext-2'... |
import os
from io import open
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
... |
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision
from torchvision.transforms import Compose, ToTensor, Resize
from torch import optim
import numpy as np
from torch.hub import tqdm
class PatchExtractor(nn.Module):
def __init__(self, patch_size=16):
... |
import os
import time
import requests
import tarfile
import numpy as np
import argparse
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
################################
### GAT LAYER DEFINITION ###
################################
class GraphAttentionLayer(nn.Modul... |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init_... |
import argparse
import torch
import torch.multiprocessing as mp
from torch.distributed._tensor import DeviceMesh
from torch.distributed.tensor.parallel import parallelize_module
from utils import cleanup, setup, ToyModel
try:
from torch.distributed.tensor.parallel import (
SequenceParallel
)
SP_A... |
import argparse
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
def setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
dist.init_process_group("nccl", ran... |
import argparse
import torch
import torch.multiprocessing as mp
from torch.distributed._tensor import DeviceMesh
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from utils import cleanup, setup, ToyModel
"""
This is the script to test Tensor Parallel(TP) on a toy model in a
Megetr... |
import argparse
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.distributed._tensor import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from tor... |
import argparse
import os
import sys
import tempfile
from urllib.parse import urlparse
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel... |
import os
import tempfile
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_POR... |
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from transformers import AutoTokenizer, GPT2TokenizerFast
from transformers import T5Tokenizer, T5ForConditionalGeneration
import functools
from torch.optim.lr_scheduler import StepLR
import torch.nn... |
import argparse
import glob
import os
import json
import time
import logging
import random
import re
from itertools import chain
from string import punctuation
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset, load_metric
from tra... |
import os
import torch
import torch.distributed as dist
from datetime import datetime
import tqdm
from transformers import AutoTokenizer, GPT2TokenizerFast
from transformers import T5Tokenizer, T5ForConditionalGeneration
g_gigabyte = 1024**3
def setup():
# initialize the process group
dist.init_process_group(... |
from .environment import bfloat_support
from .train_utils import setup, cleanup, get_date_of_run, format_metrics_to_gb, train, validation,setup_model
|
# Copyright (c) 2022 Meta Platforms, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the Apache-style license found in the
# LICENSE file in the root directory of this source tree.
# This is a simple check to confirm that your current server has full bfloat support -
# both GPU ... |
import torch
import os
import torch.distributed as dist
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
apply_activation_checkpointing,
)
from transformers.models.t5.modeling_t5 import T5Block
from functools import partial
non_reentrant_wrappe... |
import torch
from torch.distributed.fsdp import (
# FullyShardedDataParallel as FSDP,
# CPUOffload,
MixedPrecision,
# BackwardPrefetch,
# ShardingStrategy,
)
# requires grad scaler in main loop
fpSixteen = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
r... |
from .mixed_precision import *
from .wrapping import *
from .activation_checkpointing_functions import apply_fsdp_checkpointing
|
# holds various wrapping policies for fsdp
import torch.distributed as dist
import torch.nn as nn
import torch
from transformers.models.t5.modeling_t5 import T5Block
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
CPUOffload,
BackwardPrefetch,
Mixed... |
from pathlib import Path
from datetime import datetime
import torch
import time
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
StateDictType,
FullStateDictConfig, # general model non-sharded, non-flattened params
LocalStateDictConfig, # flattened params, usable only by FSDP
... |
from .checkpoint_handler import (
load_model_checkpoint,
save_model_checkpoint,
save_distributed_model_checkpoint,
load_distributed_model_checkpoint,
load_optimizer_checkpoint,
save_optimizer_checkpoint,
save_model_and_optimizer_sharded,
load_model_sharded,
)
|
from dataclasses import dataclass, field
from typing import ClassVar
from torch.distributed.fsdp import ShardingStrategy
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
@dataclass
class fsdp_config:
mixed_precision: bool=True
use_fp16: bool=False
seed: int=42
fsdp_activatio... |
from .fsdp import fsdp_config
from .training import train_config
|
from dataclasses import dataclass
from typing import ClassVar
@dataclass
class train_config:
model_name: str="t5-base"
run_validation: bool=True
batch_size_training: int=4
num_workers_dataloader: int=2
lr: float=0.002
weight_decay: float=0.0
gamma: float= 0.85
use_fp16: bool=False
... |
import torch
from torch.utils.data import Dataset
import fsspec
from dataclasses import dataclass
"""
Adapted from https://github.com/karpathy/minGPT/blob/master/projects/chargpt/chargpt.py
"""
@dataclass
class DataConfig:
path: str = None
block_size: int = None
train_split: float = None
truncate: flo... |
"""
Full definition of a GPT Language Model, all of it in this single file.
Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
from dataclasses import dataclass
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
@dataclass
class GPTConfig:
model_type... |
"""
Simple training loop; Boilerplate that could apply to any arbitrary neural network,
so nothing in this file really has anything to do with GPT specifically.
"""
from dataclasses import dataclass, asdict
from collections import OrderedDict
from typing import Optional, Any, Dict
import os
import torch
from torch.ut... |
import os
import torch
from torch.utils.data import random_split
from torch.distributed import init_process_group, destroy_process_group
from model import GPT, GPTConfig, OptimizerConfig, create_optimizer
from trainer import Trainer, TrainerConfig
from char_dataset import CharDataset, DataConfig
from omegaconf import D... |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import in... |
import torch
from torch.utils.data import Dataset
class MyTrainDataset(Dataset):
def __init__(self, size):
self.size = size
self.data = [(torch.rand(20), torch.rand(1)) for _ in range(size)]
def __len__(self):
return self.size
def __getitem__(self, index):
return self.... |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
class Trainer:
def __init__(
self,
model: torch.nn.Module,
train_data: DataLoader,
optimizer: torch.optim.Optimizer,
gpu_id: int,
save_... |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import in... |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import in... |
import os
import threading
import time
from functools import wraps
import torch
import torch.nn as nn
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch... |
import random
import torch
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.nn import RemoteModule
from torch.distributed.optim import DistributedOptimizer
from t... |
import argparse
import gym
import os
import threading
import time
import torch
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote
from torch.distribu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.