Dataset Viewer
Auto-converted to Parquet Duplicate
python_code
stringlengths
0
229k
## @package process # Module doxygen.process # Script to insert preamble for doxygen and regen API docs import glob, os, shutil # Module caffe2...caffe2.python.control_test def insert(originalfile,first_line,description): with open(originalfile,'r') as f: f1 = f.readline() if(f1.find(first_line)<0...
## @package publish # Module doxygen.publish import os, shutil if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-c"): print("Looks like you ran this before, so we need to cleanup those old files...") shutil.rmtree("/Users/aaronmarkham/caffe2/doxygen-c") if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-...
import os import torch from torch.utils.ffi import create_extension this_file = os.path.dirname(__file__) sources = ['src/my_lib.c'] headers = ['src/my_lib.h'] defines = [] with_cuda = False if torch.cuda.is_available(): print('Including CUDA code.') sources += ['src/my_lib_cuda.c'] headers += ['src/my_l...
import torch import torch.nn as nn from torch.autograd import Variable from modules.add import MyAddModule class MyNetwork(nn.Module): def __init__(self): super(MyNetwork, self).__init__() self.add = MyAddModule() def forward(self, input1, input2): return self.add(input1, input2) mod...
# functions/add.py import torch from torch.autograd import Function from _ext import my_lib class MyAddFunction(Function): def forward(self, input1, input2): output = input1.new() if not input1.is_cuda: my_lib.my_lib_add_forward(input1, input2, output) else: my_lib....
from torch.nn.modules.module import Module from functions.add import MyAddFunction class MyAddModule(Module): def forward(self, input1, input2): return MyAddFunction()(input1, input2)
import os import torch from torch.utils.ffi import create_extension this_file = os.path.dirname(__file__) sources = ['my_package/src/my_lib.c'] headers = ['my_package/src/my_lib.h'] defines = [] with_cuda = False if torch.cuda.is_available(): print('Including CUDA code.') sources += ['my_package/src/my_lib_c...
import torch import torch.nn as nn from torch.autograd import Variable from my_package.modules.add import MyAddModule class MyNetwork(nn.Module): def __init__(self): super(MyNetwork, self).__init__() self.add = MyAddModule() def forward(self, input1, input2): return self.add(input1, in...
# functions/add.py import torch from torch.autograd import Function from .._ext import my_lib class MyAddFunction(Function): def forward(self, input1, input2): output = input1.new() if not input1.is_cuda: my_lib.my_lib_add_forward(input1, input2, output) else: my_li...
from torch.nn.modules.module import Module from ..functions.add import MyAddFunction class MyAddModule(Module): def forward(self, input1, input2): return MyAddFunction()(input1, input2)
import torch torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so") def compute(x, y, z): x = torch.ops.my_ops.warp_perspective(x, torch.eye(3)) return x.matmul(y) + torch.relu(z) inputs = [torch.randn(4, 8), torch.randn(8, 5), torch.randn(8, 5)] trace = torch.jit.trace(compute...
# Run `python setup.py build develop` before running this example! import torch torch.ops.load_library("warp_perspective.so") print(torch.ops.my_ops.warp_perspective)
import torch torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so") print(torch.ops.my_ops.warp_perspective(torch.randn(32, 32), torch.rand(3, 3)))
import torch import torch.utils.cpp_extension op_source = """ #include <opencv2/opencv.hpp> #include <torch/script.h> torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) { cv::Mat image_mat(/*rows=*/image.size(0), /*cols=*/image.size(1), /*type=*/CV_32FC1,...
import torch import torch.utils.cpp_extension torch.utils.cpp_extension.load( name="warp_perspective", sources=["example_app/warp_perspective/op.cpp"], extra_ldflags=["-lopencv_core", "-lopencv_imgproc"], is_python_module=False, verbose=True ) print(torch.ops.my_ops.warp_perspective)
import torch torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so") @torch.jit.script def compute(x, y): if bool(x[0][0] == 42): z = 5 else: z = 10 x = torch.ops.my_ops.warp_perspective(x, torch.eye(3)) return x.matmul(y) + z print(compute.graph) print(c...
from __future__ import division from __future__ import print_function import argparse import math import time import torch TIME_SCALES = {'s': 1, 'ms': 1000, 'us': 1000000} parser = argparse.ArgumentParser() parser.add_argument('example', choices=['py', 'cpp', 'cuda']) parser.add_argument('-b', '--batch-size', type...
from __future__ import division from __future__ import print_function import argparse import numpy as np import torch import python.lltm_baseline import cpp.lltm def check_equal(first, second, verbose): if verbose: print() for i, (x, y) in enumerate(zip(first, second)): x = x.cpu().detach()....
from __future__ import division from __future__ import print_function import argparse import torch from torch.autograd import gradcheck parser = argparse.ArgumentParser() parser.add_argument('example', choices=['py', 'cpp', 'cuda']) parser.add_argument('-b', '--batch-size', type=int, default=3) parser.add_argument('-...
import math import torch import torch.nn.functional as F torch.manual_seed(42) class LLTM(torch.nn.Module): def __init__(self, input_features, state_size): super(LLTM, self).__init__() self.input_features = input_features self.state_size = state_size # 3 * state_size for input gat...
import math from torch import nn from torch.autograd import Function import torch import torch.nn.functional as F torch.manual_seed(42) def d_sigmoid(z): s = torch.sigmoid(z) return (1 - s) * s def d_tanh(z): t = torch.tanh(z) return 1 - (t * t) def d_elu(z, alpha=1.0): e = z.exp() mask ...
from torch.utils.cpp_extension import load lltm_cuda = load( 'lltm_cuda', ['lltm_cuda.cpp', 'lltm_cuda_kernel.cu'], verbose=True) help(lltm_cuda)
import math from torch import nn from torch.autograd import Function import torch import lltm_cuda torch.manual_seed(42) class LLTMFunction(Function): @staticmethod def forward(ctx, input, weights, bias, old_h, old_cell): outputs = lltm_cuda.forward(input, weights, bias, old_h, old_cell) new...
from torch.utils.cpp_extension import load lltm_cpp = load(name="lltm_cpp", sources=["lltm.cpp"], verbose=True) help(lltm_cpp)
import math from torch import nn from torch.autograd import Function import torch import lltm_cpp torch.manual_seed(42) class LLTMFunction(Function): @staticmethod def forward(ctx, input, weights, bias, old_h, old_cell): outputs = lltm_cpp.forward(input, weights, bias, old_h, old_cell) new_h...
from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.optim.lr_scheduler import StepLR class Net(nn.Module): def __init__(self): super(Net, self).__init__(...
############################################################################### # Language Modeling on Wikitext-2 # # This file generates new sentences sampled from the language model. # ############################################################################### import argparse import torch import data parser = a...
import math import torch import torch.nn as nn import torch.nn.functional as F class RNNModel(nn.Module): """Container module with an encoder, a recurrent module, and a decoder.""" def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False): super(RNNModel, self).__init__...
# coding: utf-8 import argparse import time import math import os import torch import torch.nn as nn import torch.onnx import data import model parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM/GRU/Transformer Language Model') parser.add_argument('--data', type=str, default='./data/wikitext-2'...
import os from io import open import torch class Dictionary(object): def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, word): if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word] = len(self.idx2word) - 1 ...
import argparse import torch import torch.nn as nn from torch.utils.data import DataLoader import torchvision from torchvision.transforms import Compose, ToTensor, Resize from torch import optim import numpy as np from torch.hub import tqdm class PatchExtractor(nn.Module): def __init__(self, patch_size=16): ...
import os import time import requests import tarfile import numpy as np import argparse import torch from torch import nn import torch.nn.functional as F from torch.optim import Adam ################################ ### GAT LAYER DEFINITION ### ################################ class GraphAttentionLayer(nn.Modul...
from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim.lr_scheduler import StepLR from torchvision import datasets, transforms class Net(nn.Module): def __init__(self): super(Net, self).__init_...
import argparse import torch import torch.multiprocessing as mp from torch.distributed._tensor import DeviceMesh from torch.distributed.tensor.parallel import parallelize_module from utils import cleanup, setup, ToyModel try: from torch.distributed.tensor.parallel import ( SequenceParallel ) SP_A...
import argparse import os import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn def setup(rank, world_size): os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "12355" # initialize the process group dist.init_process_group("nccl", ran...
import argparse import torch import torch.multiprocessing as mp from torch.distributed._tensor import DeviceMesh from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module from utils import cleanup, setup, ToyModel """ This is the script to test Tensor Parallel(TP) on a toy model in a Megetr...
import argparse import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.distributed._tensor import DeviceMesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( PairwiseParallel, parallelize_module, ) from tor...
import argparse import os import sys import tempfile from urllib.parse import urlparse import torch import torch.distributed as dist import torch.nn as nn import torch.optim as optim from torch.nn.parallel import DistributedDataParallel as DDP class ToyModel(nn.Module): def __init__(self): super(ToyModel...
import os import tempfile import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn import torch.optim as optim from torch.nn.parallel import DistributedDataParallel as DDP def setup(rank, world_size): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_POR...
import os import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from transformers import AutoTokenizer, GPT2TokenizerFast from transformers import T5Tokenizer, T5ForConditionalGeneration import functools from torch.optim.lr_scheduler import StepLR import torch.nn...
import argparse import glob import os import json import time import logging import random import re from itertools import chain from string import punctuation import pandas as pd import numpy as np import torch from torch.utils.data import Dataset, DataLoader from datasets import load_dataset, load_metric from tra...
import os import torch import torch.distributed as dist from datetime import datetime import tqdm from transformers import AutoTokenizer, GPT2TokenizerFast from transformers import T5Tokenizer, T5ForConditionalGeneration g_gigabyte = 1024**3 def setup(): # initialize the process group dist.init_process_group(...
from .environment import bfloat_support from .train_utils import setup, cleanup, get_date_of_run, format_metrics_to_gb, train, validation,setup_model
# Copyright (c) 2022 Meta Platforms, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the Apache-style license found in the # LICENSE file in the root directory of this source tree. # This is a simple check to confirm that your current server has full bfloat support - # both GPU ...
import torch import os import torch.distributed as dist from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, apply_activation_checkpointing, ) from transformers.models.t5.modeling_t5 import T5Block from functools import partial non_reentrant_wrappe...
import torch from torch.distributed.fsdp import ( # FullyShardedDataParallel as FSDP, # CPUOffload, MixedPrecision, # BackwardPrefetch, # ShardingStrategy, ) # requires grad scaler in main loop fpSixteen = MixedPrecision( param_dtype=torch.float16, # Gradient communication precision. r...
from .mixed_precision import * from .wrapping import * from .activation_checkpointing_functions import apply_fsdp_checkpointing
# holds various wrapping policies for fsdp import torch.distributed as dist import torch.nn as nn import torch from transformers.models.t5.modeling_t5 import T5Block from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, CPUOffload, BackwardPrefetch, Mixed...
from pathlib import Path from datetime import datetime import torch import time from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig, # general model non-sharded, non-flattened params LocalStateDictConfig, # flattened params, usable only by FSDP ...
from .checkpoint_handler import ( load_model_checkpoint, save_model_checkpoint, save_distributed_model_checkpoint, load_distributed_model_checkpoint, load_optimizer_checkpoint, save_optimizer_checkpoint, save_model_and_optimizer_sharded, load_model_sharded, )
from dataclasses import dataclass, field from typing import ClassVar from torch.distributed.fsdp import ShardingStrategy from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType @dataclass class fsdp_config: mixed_precision: bool=True use_fp16: bool=False seed: int=42 fsdp_activatio...
from .fsdp import fsdp_config from .training import train_config
from dataclasses import dataclass from typing import ClassVar @dataclass class train_config: model_name: str="t5-base" run_validation: bool=True batch_size_training: int=4 num_workers_dataloader: int=2 lr: float=0.002 weight_decay: float=0.0 gamma: float= 0.85 use_fp16: bool=False ...
import torch from torch.utils.data import Dataset import fsspec from dataclasses import dataclass """ Adapted from https://github.com/karpathy/minGPT/blob/master/projects/chargpt/chargpt.py """ @dataclass class DataConfig: path: str = None block_size: int = None train_split: float = None truncate: flo...
""" Full definition of a GPT Language Model, all of it in this single file. Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ from dataclasses import dataclass import math import torch import torch.nn as nn from torch.nn import functional as F @dataclass class GPTConfig: model_type...
""" Simple training loop; Boilerplate that could apply to any arbitrary neural network, so nothing in this file really has anything to do with GPT specifically. """ from dataclasses import dataclass, asdict from collections import OrderedDict from typing import Optional, Any, Dict import os import torch from torch.ut...
import os import torch from torch.utils.data import random_split from torch.distributed import init_process_group, destroy_process_group from model import GPT, GPTConfig, OptimizerConfig, create_optimizer from trainer import Trainer, TrainerConfig from char_dataset import CharDataset, DataConfig from omegaconf import D...
import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from datautils import MyTrainDataset import torch.multiprocessing as mp from torch.utils.data.distributed import DistributedSampler from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed import in...
import torch from torch.utils.data import Dataset class MyTrainDataset(Dataset): def __init__(self, size): self.size = size self.data = [(torch.rand(20), torch.rand(1)) for _ in range(size)] def __len__(self): return self.size def __getitem__(self, index): return self....
import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from datautils import MyTrainDataset class Trainer: def __init__( self, model: torch.nn.Module, train_data: DataLoader, optimizer: torch.optim.Optimizer, gpu_id: int, save_...
import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from datautils import MyTrainDataset import torch.multiprocessing as mp from torch.utils.data.distributed import DistributedSampler from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed import in...
import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from datautils import MyTrainDataset import torch.multiprocessing as mp from torch.utils.data.distributed import DistributedSampler from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed import in...
import os import threading import time from functools import wraps import torch import torch.nn as nn import torch.distributed.autograd as dist_autograd import torch.distributed.rpc as rpc import torch.multiprocessing as mp import torch.optim as optim from torch.distributed.optim import DistributedOptimizer from torch...
import random import torch import torch.distributed as dist import torch.distributed.autograd as dist_autograd import torch.distributed.rpc as rpc import torch.multiprocessing as mp import torch.optim as optim from torch.distributed.nn import RemoteModule from torch.distributed.optim import DistributedOptimizer from t...
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
4