python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
import torch
from contextlib import contextmanager
from functools import partial
from torch.cuda.amp import autocast
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
@contextmanager
def null_context():
yield
def linear_attention_normalization(q, k, causal=False):
i... | fly-master | src/models/attention/scatterbrain_utils.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/full_attention.py
class FullAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
... | fly-master | src/models/attention/full_attention.py |
# Adapted from https://github.com/giannisdaras/smyrf/blob/master/smyrf/torch/attn.py
import math
import torch
import torch.nn as nn
from einops import rearrange, repeat
from src.utils.padding import pad_to_multiple
from src.ops.permutation import invert_permutation
from src.models.attention.hash_utils import XBOXPLUS... | fly-master | src/models/attention/smyrf_attention.py |
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/feature_maps/fourier_features.py
import math
import torch
from einops import rearrange, repeat
from fast_transformers.feature_maps.base import FeatureMap
from src.models.attention.projection_utils import gaussian_orthogonal_rando... | fly-master | src/models/attention/performer_feature_map.py |
import torch
from einops import rearrange, repeat
# Should do the same thing as https://github.com/openai/triton/blob/8bedcce9befbbe95d8fe0a082718edc4050e2831/python/triton/testing.py#L22
# but faster.
def sparsify_tensor(x, mask):
"""
Arguments:
x: (..., n_head, T, S)
mask: (n_head, T // blo... | fly-master | src/models/attention/blocksparse_utils.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from fast_transformers.local_product import local_dot_product, local_weighted_average
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.feature_maps_sb import SBPerformerFeatures
from src.models.attention.... | fly-master | src/models/attention/sblocal_attention.py |
# Adapted from https://github.com/giannisdaras/smyrf/blob/master/smyrf/torch/utils.py
''' Utility functions for smyrf '''
import torch
import torch.nn.functional as F
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def random_flip(x):
flips = torch.ceil((torch.r... | fly-master | src/models/attention/hash_utils.py |
# Adapted from https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/softmax.py
import triton.language as tl
import triton
import torch
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
... | fly-master | src/models/attention/blocksparse_logsumexp.py |
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/feature_maps/fourier_features.py
import math
import torch
from einops import rearrange
from src.models.attention.projection_utils import gaussian_orthogonal_random_matrix
# from fast_transformers.feature_maps.base import Feature... | fly-master | src/models/attention/feature_maps_sb.py |
# Adapted from https://github.com/lucidrains/performer-pytorch/blob/main/performer_pytorch/performer_pytorch.py
import math
import torch
from torch import nn
from einops import rearrange
from functools import partial
from src.models.attention.projection_utils import gaussian_orthogonal_random_matrix
from src.models.a... | fly-master | src/models/attention/performer_attention.py |
import torch
import torch.nn as nn
import hydra
from einops import rearrange
class CombinationAttention(nn.Module):
"""
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
... | fly-master | src/models/attention/combination_attention.py |
# Adapted from https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import wraps
from einops import rearrange, repeat
from src.utils.padding import pad_to_multiple
from src.ops.perm... | fly-master | src/models/attention/reformer_attention.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed as dist
from einops import rearrange
import hydra
from triton.ops.blocksparse import softmax
from deepspeed.ops.sparse_attention import FixedSparsityConfig
from fast_transformers.local_product import local_... | fly-master | src/models/attention/sbblocksparse_attention.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed as dist
from einops import rearrange
import hydra
from deepspeed.ops.sparse_attention import SparsityConfig
# from triton.ops.blocksparse import matmul, softmax
from triton.ops.blocksparse import softmax
fro... | fly-master | src/models/attention/blocksparse_attention.py |
import torch
from einops import rearrange
def batched_index_select(values: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
"""
Params:
values: (1 or n_hashes, batch, seqlen, dim)
indices: (n_hashes, batch, seqlen)
Return:
(n_hashes, batch, seqlen, dim)
"""
last_dim = ... | fly-master | src/models/attention/batching_utils.py |
# Adapted from https://github.com/giannisdaras/smyrf/blob/master/smyrf/torch/attn.py
import math
import torch
import torch.nn as nn
from einops import rearrange, repeat
from src.utils.padding import pad_to_multiple
from src.ops.permutation import invert_permutation
from src.models.attention.hash_utils import XBOXPLUS... | fly-master | src/models/attention/sbsmyrf_attention.py |
# Adapted from https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/softmax.py
import triton.language as tl
import triton
import torch
from src.models.attention.blocksparse_utils import sparsify_broadcast_tensor
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
... | fly-master | src/models/attention/blocksparse_sum.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from fast_transformers.local_product import local_dot_product, local_weighted_average
from src.models.modules.masking import FullMask, LengthMask
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attent... | fly-master | src/models/attention/local_attention.py |
"""Implement linear attention."""
import torch
import torch.nn as nn
import hydra
from einops import rearrange
from fast_transformers.feature_maps import elu_feature_map
from src.models.modules.masking import TriangularCausalMask
from src.models.attention.performer_utils import causal_linear_attention, linear_atte... | fly-master | src/models/attention/linear_attention.py |
import math
import torch
from einops import rearrange
def gaussian_orthogonal_random_matrix(nrows, ncols, scaling=0, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
nblocks = int(math.ceil(nrows / ncols))
# TD [2021-10-28]: Sometimes QR fails on CUDA
unstructured_blocks... | fly-master | src/models/attention/projection_utils.py |
import torch.nn.functional as F
from src.models.modules.masking import FullMask, LengthMask
def pad_mask(mask, pad_length, left=True, value=True):
assert value in [True, False]
assert isinstance(mask, (FullMask, LengthMask))
if isinstance(mask, FullMask):
pad = (pad_length, 0) if left else (0, pa... | fly-master | src/models/attention/mask_utils.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/exact_topk_attention.py
class ExactTopKAttention(nn.Module):
"""Implement the oracle top-k softmax attention.
Arguments
---------
... | fly-master | src/models/attention/topk_attention.py |
import math
import torch
from torch.nn import functional as F
from einops import rearrange
def block_butterfly_multiply(twiddle, input, increasing_stride=True,
output_size=None):
"""
twiddle: (nstacks, nblocks, log_n, n // 2, 2, 2, block_size, block_size)
input: (batch_size,... | fly-master | src/models/layers/block_butterfly_multiply.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from src.models.layers.structured_linear import StructuredLinear
from src.ops.blockdiag_multiply import blockdiag_multiply
class BlockdiagLinear(StructuredLinear):
def __init__(self, *args, nblocks=4, shuffle=False, **kwargs):
... | fly-master | src/models/layers/blockdiag_linear.py |
import math
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
def blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2):
"""
This implementation is slow but more likely to be correct.
There are 3 implementations, which should all yield the... | fly-master | src/models/layers/blockdiag_butterfly_multiply.py |
import math
import torch
import torch.nn as nn
from torch.nn import init
from einops import rearrange
from src.models.layers.structured_linear import StructuredLinear
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.utils.utils import get_logger
logger = get_logger()
... | fly-master | src/models/layers/monarch_linear.py |
import torch
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's... | fly-master | src/models/layers/weight_init_helper.py |
fly-master | src/models/layers/__init__.py | |
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from einops import rearrange, repeat
import hydra
from src.utils.utils import get_logger
logger = get_logger()
from src.utils.padding import pad_to_multiple
try:
from src.model... | fly-master | src/models/layers/blocksparse_linear.py |
""" MLP module w/ dropout and configurable activation layer
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.layers.fastlinear import FastLinear, ButterflyLinear, RandomLinear, SLLinear, \
SLXLinear, TopkLinear, TopkLrLinear, B... | fly-master | src/models/layers/mlp.py |
# From https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_1d.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torch.nn.utils.parametrize as parametrize
# class RealToComplex(nn.Module):
# def forward(self, x):
# return torch.view_as_complex(x)
# [2... | fly-master | src/models/layers/spectral_conv.py |
import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import Linear, init
class MaskLinear(nn.Module):
r"""
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __... | fly-master | src/models/layers/maskedlinear.py |
from typing import Union
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import Linear, init
from einops import rearrange
import hydra
from src.ops.low_rank import low_rank_project
from src.ops.blockdiag_bu... | fly-master | src/models/layers/fastlinear.py |
""" Image to Patch Embedding using Conv2d
A convolution based approach to patchifying a 2D image w/ embedding projection.
Based on the impl in https://github.com/google-research/vision_transformer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from itertools import repeat
import colle... | fly-master | src/models/layers/patch_embed.py |
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class StructuredLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, device=None, dtype=None):
"""Subclasses should call reset_parameters
... | fly-master | src/models/layers/structured_linear.py |
# Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/components/positional_embedding/rotary.py
# We split the input differently ((d 2) -> d 2 instead of (2 d) -> d 2), following the original
# paper's implementation. This should not matter.
# Copyright (c) Facebook, Inc. and its affiliates. A... | fly-master | src/models/layers/rotary.py |
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
T2T-ViT
"""
import math
from functools import partial
import torch
import torch.nn as nn
from einops import rearran... | fly-master | src/models/vit/t2t_vit.py |
"""
The original Vision Transformer (ViT) from timm, copyright belongs to / Copyright 2020 Ross Wightman
"""
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import ... | fly-master | src/models/vit/vit.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_normal_, xavier_uniform_
from torch.nn.init import constant_
from torch.nn.parameter import Parameter
from torch import Tensor
from typing import Tuple, List, Optional
from einops import rearrange
from src... | fly-master | src/models/modules/multihead_attention.py |
""" Standalone version of Structured (Sequence) State Space (S4) model. """
import logging
from functools import partial
import math
import numpy as np
from scipy import special as ss
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as U
from pytorch_lightning.utilities import ... | fly-master | src/models/modules/s4.py |
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
Take the standard Transformer as T2T Transformer
"""
import torch.nn as nn
from torchvision.ops import StochasticDep... | fly-master | src/models/modules/token_transformer.py |
import math
import torch
import torch.nn as nn
import hydra
from einops import reduce, rearrange
from src.utils.tuples import to_2tuple
def pooling(x, pooling_mode='CLS', key_padding_mask=None, batch_first=True):
if pooling_mode not in ['MEAN', 'SUM', 'CLS', 'FLATTEN']:
raise NotImplementedError(f'poo... | fly-master | src/models/modules/seq_common.py |
from torch import nn
class SimpleDenseNet(nn.Module):
def __init__(self, hparams: dict):
super().__init__()
self.model = nn.Sequential(
nn.Linear(hparams["input_size"], hparams["lin1_size"]),
nn.BatchNorm1d(hparams["lin1_size"]),
nn.ReLU(),
nn.Linea... | fly-master | src/models/modules/simple_dense_net.py |
fly-master | src/models/modules/__init__.py | |
# Copied from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/masking.py
# so that users can run most of the code without having to compile pytorch-fast-transformers.
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos... | fly-master | src/models/modules/masking.py |
# Adapted from https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
T2T-ViT
"""
import math
impor... | fly-master | src/models/modules/t2t.py |
"""
Take Performer as T2T Transformer
"""
import math
import torch
import torch.nn as nn
class Token_performer(nn.Module):
def __init__(self, dim, in_dim, head_cnt=1, kernel_ratio=0.5, dp1=0.1, dp2 = 0.1):
super().__init__()
self.emb = in_dim * head_cnt # we use 1, so it is no need here
sel... | fly-master | src/models/modules/token_performer.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# and https://github.com/yitu-opensource/T2T-ViT/blob/main/models/transformer_block.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import hydra
from torch... | fly-master | src/models/modules/vision_common.py |
import torch
from einops import rearrange
def low_rank_project(M, rank):
"""Supports batches of matrices as well.
"""
U, S, Vt = torch.linalg.svd(M)
S_sqrt = S[..., :rank].sqrt()
U = U[..., :rank] * rearrange(S_sqrt, '... rank -> ... 1 rank')
Vt = rearrange(S_sqrt, '... rank -> ... rank 1') *... | fly-master | src/ops/low_rank.py |
# Adapt from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py
import torch
import torch.nn as nn
# import fmhalib
# import fmhalibmine as fmhalib
import fmhalibtd as fmhalib
from einops import rearrange
def _fmha_forward(qkv, cu_seqlens, p_dropout, ... | fly-master | src/ops/bert_fmha.py |
import math
import torch
from einops import rearrange
def butterfly_factor_to_matrix(twiddle: torch.Tensor, factor_index: int) -> torch.Tensor:
"""
Let b be the base (most commonly 2).
Parameters:
twiddle: (n // b, b, b)
factor_index: an int from 0 to log_b(n) - 1
"""
n_div_b, b, ... | fly-master | src/ops/butterfly_factor.py |
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indice... | fly-master | src/ops/bert_padding.py |
import numpy as np
import torch
from torch.nn import functional as F
from einops import rearrange
from src.ops.low_rank import low_rank_project
def blockdiag_weight_to_dense_weight(weight):
"""
Argumments:
weight: (nblocks, out / nblocks, in / blocks)
Return:
dense_weight: (out / in)
... | fly-master | src/ops/blockdiag_multiply.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
# from src.ops.low_rank import low_rank_project
# Copied here so it's more self-contained
def low_rank_project(M, rank):
"""Supports batches of mat... | fly-master | src/ops/blockdiag_butterfly_projection.py |
import torch
@torch.jit.script
def jit_dropout_add(x, residual, prob):
# type: (Tensor, Tensor, float) -> Tensor
return torch.nn.functional.dropout(x, p=prob, training=True) + residual
def fused_dropout_add(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
if is_train... | fly-master | src/ops/fused_dropout_add.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
# On the backward pass, we don't use the fused kernel from cublasLt since that's a bit slower.
# Instead we use the regular backward from F.linear.
# We also make it work with pytorch amp.
# TD [2022-02-27] The fused backward is a... | fly-master | src/ops/fused_dense.py |
import math
import torch
def bitreversal_permutation(n, device=None, dtype=None):
"""Return the bit reversal permutation used in FFT.
By default, the permutation is stored in numpy array.
Parameter:
n: integer, must be a power of 2.
Return:
perm: bit reversal permutation, pytorch tenso... | fly-master | src/ops/permutation.py |
import torch
from einops import rearrange
from src.ops.low_rank import low_rank_project
def blockdiag_butterfly_multiply_einsum_simple(x, w1_bfly, w2_bfly):
"""
Arguments:
x: (batch, n)
w1_bfly: (k, j, i), where k = n / i
w2_bfly: (j, l, k)
Outputs:
out: (batch, m), where... | fly-master | src/ops/blockdiag_butterfly_einsum.py |
import torch
from softmaxlib import additive_masked_softmax_dropout_forward
from softmaxlib import masked_scale_softmax_backward_recompute
from src.ops.triton.softmax_dropout import softmax_dropout
class _fused_softmax_dropout(torch.autograd.Function):
@staticmethod
def forward(ctx, x, p, mask, return_drop... | fly-master | src/ops/fused_softmax_dropout.py |
import torch
from einops import rearrange, repeat
def sparse_project(M, density):
"""Return a sparse mask of the largest entries of M in magnitude.
"""
nparams = int(density * M.numel())
# Implementation 1
# sorted_idx = torch.argsort(M.abs().flatten(), descending=True)
# threashold = M.abs()... | fly-master | src/ops/sparse.py |
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
import math
import torch
from torch import nn
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation... | fly-master | src/ops/gelu_activation.py |
from typing import Optional
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
from einops import rearrange, repeat
import triton
import triton.language as tl
from src.ops.triton.k_softmax import _softmax, _softmax_backward
from src.op... | fly-master | src/ops/triton/softmax_dropout.py |
# Copied from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import tr... | fly-master | src/ops/triton/k_softmax_dropout.py |
# Copied from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum... | fly-master | src/ops/triton/softmax.py |
# Copied from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import tr... | fly-master | src/ops/triton/k_softmax.py |
'''
File: generate_mapping.py
Description: Maps each concept (CUI) in UMLS to a structured entity (QID) in WikiData.
'''
from tqdm import tqdm
from pathlib import Path
import pandas as pd
import sys
import logging
from importlib import reload
import json
import os
import argparse
from bootleg.end2end.extract_mentions ... | medical-ned-integration-main | generate_mapping.py |
'''
File: utils.py
Description: Helper functions for generating mappings
'''
import gzip
from tqdm import tqdm
import pandas as pd
import os
import json
from rich import print
VALID_VOCABULARIES = ['CPT', 'FMA', 'GO', 'HGNC', 'HPO', 'ICD10', \
'ICD10CM', 'ICD9CM', 'MDR', 'MSH', 'MTH', 'NCBI', \... | medical-ned-integration-main | utils.py |
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.1"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decod... | bazaar-master | condor/shared/html2text.py |
#!/usr/bin/env python
import botocore.session
import errno
import getopt
import os
import pprint
import shutil
import socket
import subprocess
import sys
import time
import urlparse
import urltools
# read env_local.sh
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subpro... | bazaar-master | distribute/ec2-client.py |
#!/usr/bin/env python
from azure import *
from azure.servicemanagement import *
import errno
import getopt
import os
import shutil
import subprocess
import sys
import time
# read env_local.sh
def source_env_local():
command = ['bash', '-c', 'source env_local.sh && env']
proc = subprocess.Popen(command, stdout... | bazaar-master | distribute/azure-client.py |
from fabric.api import *
from fabric.tasks import execute
import os
import re
def get_platform():
with hide('everything'):
return run("uname -s")
def is_installed(cmd):
with settings(warn_only=True):
with hide('everything'):
result = run('command -v ' + cmd)
return resu... | bazaar-master | distribute/fabfile.py |
#!/usr/bin/env python
from pyhocon import ConfigFactory
import json
import psycopg2
import psycopg2.extras
import sys
conf = ConfigFactory.parse_file('../view.conf')
conf_annotations = conf.get_list('view.annotations')
def write_annotations():
# write extractions to json file
dbconf = conf.get('view.db.defa... | bazaar-master | view/util/fetch-annotations.py |
#!/usr/bin/env python
from elasticsearch import Elasticsearch
import json
INPUT = "../data/sentences.json"
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'dd'
TYPE_NAME = 'docs'
N = 1000
es = Elasticsearch(hosts = [ES_HOST])
es.delete_by_query(index = INDEX_NAME, body = {
"query": {
"mat... | bazaar-master | view/util/index_docs.py |
#!/usr/bin/env python
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'view'
TYPE_ANNOTATORS_NAME = 'annotators'
TYPE_ANNOTATIONS_NAME = 'annotations'
N = 1000
from pyhocon import ConfigFactory
from elasticsearch import Elasticsearch
import json
import sys
conf = ConfigFactory.parse_file('../view.conf')... | bazaar-master | view/util/refresh-annotations.py |
#!/usr/bin/env python
# Author: Zifei Shan (zifeishan@gmail.com)
''' This file construct a sentence table from ann.* files generated from Pipe project.
Example usage:
python generate_sentence_table.py DIRECTORY/OF/ANN/ > output_sentences.tsv
The generated sentence table follow the format below:
CREATE T... | bazaar-master | view/util/generate_sentence_table.py |
#! /usr/bin/env python
# Legacy support for sentences table in DeepDive.
# The script reads the table from the database and stores it in the new column format.
from pyhocon import ConfigFactory
import json
import psycopg2
import psycopg2.extras
import sys
import pipe
conf = ConfigFactory.parse_file('../view.conf')
... | bazaar-master | view/util/fetch-sentences-table.py |
#!/usr/bin/env python
from elasticsearch import Elasticsearch
import json
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'dd'
TYPE_NAME = 'extractors'
es = Elasticsearch(hosts = [ES_HOST])
es.delete_by_query(index = INDEX_NAME, doc_type = TYPE_NAME, body = {
"query": {
"match_all": {}
... | bazaar-master | view/util/index_extrlist.py |
#!/usr/bin/env python
from elasticsearch import Elasticsearch
import json
EXTRACTOR='genepheno'
INPUT='../data/genepheno_rel.json'
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'dd'
TYPE_NAME = 'docs'
N = 1000
es = Elasticsearch(hosts = [ES_HOST])
with open(INPUT, 'r') as f:
bulk_data = []
f... | bazaar-master | view/util/index_extr.py |
#! /usr/bin/env python
from os import listdir
from os.path import isfile, join
import json
# column format reader
def col_open(dir):
return ColumnReaderAsSingleObj(dir)
def col_open_arr(dir):
return ColumnReader(dir)
class ColumnReader(object):
'''Reads Pipe's column format'''
def __init__(self, dir):
... | bazaar-master | view/util/pipe.py |
#!/usr/bin/env python
import pipe
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'view'
TYPE_NAME = 'docs'
N = 1000
from pyhocon import ConfigFactory
from elasticsearch import Elasticsearch
import json
import sys
conf = ConfigFactory.parse_file('../view.conf')
docs_conf = conf.get('view.docs')
es = ... | bazaar-master | view/util/refresh-documents.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import json
import time
try:
from eval_server_common import connect_to_redis
except ImportError:
print("HINT:... | codraw-models-master | eval_run_bots.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.func... | codraw-models-master | baseline4_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
An event-based view of the CoDraw dataset
"""
#%%
import numpy as np
from pathlib import Path
import json
from enum i... | codraw-models-master | codraw_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Multi-headed attention implementation
"""
#%%
import numpy as np
import torch
import torch.cuda
import torch.nn as n... | codraw-models-master | attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import heapq
import torch
import torch.cuda
import torch.nn as nn
import ... | codraw-models-master | baseline2_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from p... | codraw-models-master | baseline4_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__mai... | codraw-models-master | baseline1_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def scene_similarity_orig(pred, target):
"""
DEPRECATED: use scene_similarity instead!
Thi... | codraw-models-master | abs_metric.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
from IPython.display import display
except ImportError:
assert not INTERACTIVE
def display(*args, **kwargs... | codraw-models-master | episode.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import redis
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_PASSWORD = 'YOUR PASSWORD HERE'
REDIS_CONNECTION = None
de... | codraw-models-master | example.eval_server_common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['cpu', 'cuda_if_available', 'logsumexp', 'torch_load']
import torch
# %%
cpu = torch.device('cpu')
if torch.... | codraw-models-master | nkfb_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.func... | codraw-models-master | baseline3_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pathlib import Path
import editdistance
from collections import Counter
import torch
import torch.... | codraw-models-master | datagen.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
def load_models(*partitions):
if not partitions:
partitions = (1, 2, 3, 4)
models = {}
if 1 in p... | codraw-models-master | saved_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides the Packer class, which is useful for managing a hierarchy where each
batch element has a variable number of c... | codraw-models-master | packer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
impor... | codraw-models-master | model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import json
import numpy a... | codraw-models-master | eval_transcripts.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from p... | codraw-models-master | baseline2_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from IPython.display import SVG, display
from PIL import Image
from binascii import b2a_base64
PN... | codraw-models-master | abs_render.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__mai... | codraw-models-master | baseline2_train.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.