code stringlengths 101 5.91M |
|---|
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace('_', '.') for k in layer_keys]
layer_keys = [k.replace('.w', '.weight') for k in layer_keys]
layer_keys = [k.replace('.bn', '_bn') for k in layer_keys]
layer_keys = [k.replace('.b', '.bias') for k in layer_keys]
layer_keys = [k.re... |
def test_get_info_by_date():
with TestClient(app) as client:
begin = '2021-09-29'
end = '2021-09-30'
response = client.get(f'/{PREFIX}/info_by_date?begin={begin}&end={end}')
assert (response.status_code == 200)
body = response.json()
assert (body.get('perFemales') >= ... |
class OutputComposite(Masker):
def __init__(self, masker, model):
self.masker = masker
self.model = model
masker_attributes = ['shape', 'invariants', 'clustering', 'data_transform', 'mask_shapes', 'feature_names', 'text_data', 'image_data']
for masker_attribute in masker_attributes:
... |
def create_kind_cluster() -> None:
try:
run_check_process('kind create cluster --config openwhisk/kind-cluster.yaml')
while True:
nodes = subprocess.run('kubectl get nodes'.split(), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
node_grep = subprocess.run('grep kind'.spli... |
class OverFeatTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
(height, width) = (231, 231)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = overfeat.overfeat(inputs, num_classes)
... |
def _create_test_dataset(dataset, dataset_dir, transform, target_transform=None):
if (dataset == 'cifar10'):
test_dataset = datasets.CIFAR10(root=dataset_dir, train=False, download=True, transform=transform, target_transform=target_transform)
elif (dataset == 'cifar100'):
test_dataset = datasets... |
def multiple_replace(dic, text):
regex = re.compile(('(%s)' % '|'.join((re.escape(k) for k in dic))))
return regex.sub((lambda mo: dic[mo.string[mo.start():mo.end()]]), text) |
class SSFetcher(threading.Thread):
def __init__(self, parent):
threading.Thread.__init__(self)
self.parent = parent
self.indexes = np.arange(parent.data_len)
def run(self):
diter = self.parent
self.parent.rng.shuffle(self.indexes)
offset = 0
while (not dit... |
def inceptionresnetv2(num_classes=1000, pretrained='imagenet'):
if pretrained:
settings = pretrained_settings['inceptionresnetv2'][pretrained]
assert (num_classes == settings['num_classes']), 'num_classes should be {}, but is {}'.format(settings['num_classes'], num_classes)
model = Inception... |
def simple_tests():
np.random.seed(222)
dataset = ReplayPool(observation_shape=(3, 2), action_dim=1, max_steps=6, concat_observations=True, concat_length=4)
for _ in range(10):
img = np.random.randint(0, 256, size=(3, 2))
action = np.random.randint(16)
reward = np.random.random()
... |
def elu(x, alpha=1.0):
res = tf.nn.elu(x)
if (alpha == 1):
return res
else:
return tf.where((x > 0), res, (alpha * res)) |
def compile_partitioned_model(graph: Graph, model: Module, batch_dim: int, generate_explicit_del: bool=False, generate_activation_propagation: bool=True, output_file: Optional[str]=None):
re_assign = True
try:
ensure_inputs_are_used(graph, assert_same_stages=True)
ensure_no_unnecessary_tuple_sen... |
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (Cluster-GCN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_partitions', type=int, default=15000)
parser.add_argument('--num_workers', t... |
def create_command(name, **kwargs):
(module_path, class_name, summary) = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_class(name=name, summary=summary, **kwargs)
return command |
def test__contextual_partition(expected, observed):
expected = list(expected[['start', 'end']].itertuples(index=False))
observed = list(observed[['start', 'end']].itertuples(index=False))
expected_parts = [1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1]
observed_parts = [0, 1, 0, 1, 0, 0, 0, 1,... |
class Fmodel(nn.Module):
def __init__(self):
super(Fmodel, self).__init__()
self.statefc = nn.Sequential(nn.Linear(2, 16), nn.ReLU(), nn.Linear(16, 16), nn.ReLU(), nn.Linear(16, 32))
self.actionfc = nn.Sequential(nn.Linear(2, 16), nn.ReLU(), nn.Linear(16, 16), nn.ReLU(), nn.Linear(16, 32))
... |
def vgg19(use_batch_norm: bool=True, layers: str='Baysian_Ma', state_dict: str=None):
model = VGG_Baysian_Ma(make_layers(cfg[layers], batch_norm=use_batch_norm))
if (state_dict is None):
if use_batch_norm:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn'], 'Model/model_pretrain... |
def single_wall_mobility_trans_times_force_source_target_pycuda(source, target, force, radius_source, radius_target, eta, *args, **kwargs):
number_of_sources = np.int32((source.size // 3))
number_of_targets = np.int32((target.size // 3))
(threads_per_block, num_blocks) = set_number_of_threads_and_blocks(num... |
class SpectralNetModel(nn.Module):
def __init__(self, architecture: dict, input_dim: int):
super(SpectralNetModel, self).__init__()
self.architecture = architecture
self.layers = nn.ModuleList()
self.input_dim = input_dim
current_dim = self.input_dim
for (i, layer) in... |
class CascadeMsgType(Enum):
KEY = auto()
PARAMS = auto()
CHECKSUMS = auto()
SEND_FOR_BINARY = auto()
RECEIVE_FOR_BINARY = auto()
GENERATE_KEY = auto()
KEY_IS_VALID = auto() |
def tensor2depth(input_depth, imtype=np.int32):
if isinstance(input_depth, torch.Tensor):
depth_tensor = input_depth.data
else:
return input_depth
depth_numpy = depth_tensor[0].cpu().float().numpy()
depth_numpy += 1.0
depth_numpy /= 2.0
depth_numpy *= 65535.0
depth_numpy = de... |
def register_Ns3LteRlcSapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRlcSapProvider const &', 'arg0')])
cls.add_method('TransmitPdcpPdu', 'void', [param('ns3::LteRlcSapProvider::TransmitPdcpPduParameters', 'params')], is_pure_virtual=True, is_virtual=True)... |
def plot_waterfall_all(SELECTED_DATASET):
model_correctness_pair = correctness_dfs[SELECTED_DATASET]
model_correctness_pair = {k: v for (k, v) in model_correctness_pair.items() if (k != 'clip_vit_l_14')}
accuraccies = {}
t_above_100 = [t for t in all_tsizes if (t > 100)]
sample_index = list(model_co... |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, loss='softmax', fc_dims=None, dropout_p=None, **kwargs):
scale = 64
self.inplanes = scale
super(ResNet, self).__init__()
self.loss = loss
self.feature_dim = ((scale * 8) * block.expansion)
se... |
def get_line(node: FASTNode):
line = None
if ((node.item is not None) and hasattr(node.item, 'span')):
line = node.item.span
else:
tmp = node
while (tmp.parent is not None):
tmp = tmp.parent
if ((tmp.item is not None) and hasattr(tmp.item, 'span')):
... |
class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len) |
class DataProvider():
VALID_SEED = 0
def name():
raise NotImplementedError
def data_shape(self):
raise NotImplementedError
def n_classes(self):
raise NotImplementedError
def save_path(self):
raise NotImplementedError
def data_url(self):
raise NotImplemente... |
class EstimatorWithSetOutput(_SetOutputMixin):
def fit(self, X, y=None):
self.n_features_in_ = X.shape[1]
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return np.asarray([f'X{i}' for i in range(self.n_features_in_)]... |
def _seg_21():
return [(8239, '3', u' '), (8240, 'V'), (8243, 'M', u''), (8244, 'M', u''), (8245, 'V'), (8246, 'M', u''), (8247, 'M', u''), (8248, 'V'), (8252, '3', u'!!'), (8253, 'V'), (8254, '3', u' '), (8255, 'V'), (8263, '3', u'??'), (8264, '3', u'?!'), (8265, '3', u'!?'), (8266, 'V'), (8279, 'M', u''), (8280, ... |
def filter_answers(train_qa_pairs, val_qa_pairs, min_occurence):
occurence = {}
qa_pairs = train_qa_pairs.append(val_qa_pairs)
qa_pairs['answer'] = qa_pairs['answer'].apply((lambda x: str(x)))
for (id, row) in qa_pairs.iterrows():
gtruth = row['answer']
gtruth = ' '.join(gtruth.split())
... |
def maybe_download_and_extract_movie_data(data_dir, force_overwrite=False):
write_path = os.path.join(data_dir, 'ml-20m.zip')
zip_url = '
if (not os.path.isfile(write_path)):
os.makedirs(data_dir, exist_ok=True)
print('Zip not downloaded. Downloading now...')
save_zip_data(write_path... |
class HDDM_W(BaseDriftDetector):
class SampleInfo():
def __init__(self):
self.EWMA_estimator = (- 1.0)
self.independent_bounded_condition_sum = None
def __init__(self, drift_confidence=0.001, warning_confidence=0.005, lambda_option=0.05, two_side_option=True):
super().__i... |
_processor_variant(TOKENIZE, 'pythainlp')
class PyThaiNLPTokenizer(ProcessorVariant):
def __init__(self, config):
if (config['lang'] != 'th'):
raise Exception('PyThaiNLP tokenizer is only allowed in Thai pipeline.')
check_pythainlp()
from pythainlp.tokenize import sent_tokenize a... |
def get_nn_act_func(act, inplace=True, **kwargs):
if (act is None):
return nn.Identity()
if (act.lower() == 'relu'):
act_func = nn.ReLU(inplace=inplace)
elif (act.lower() == 'sigmoid'):
act_func = nn.Sigmoid()
elif (act.lower() == 'prelu'):
act_func = nn.PReLU(**kwargs)
... |
class TFIDF():
def __init__(self, map: t.Dict[(int, t.List[int])]):
self.__map = map
self.__o = Counter((feature for feature_list in self.__map.values() for feature in feature_list))
self.__maxi = max(self.__o.values())
self.__total_documents = len(self.__map)
self.__idfo = {... |
class Dataset():
def __init__(self, dataset, data_path='./data', normalize=False, random_state=50, **kwargs):
np.random.seed(random_state)
torch.manual_seed(random_state)
random.seed(random_state)
if (dataset in DATASETS):
data_dict = DATASETS[dataset](osp.join(data_path,... |
def main(opts):
device = 'cpu'
if (torch.cuda.is_available and (not opts.no_cuda)):
device = 'cuda'
opts.cuda = True
CUDA = (device == 'cuda')
random.seed(opts.seed)
np.random.seed(opts.seed)
torch.manual_seed(opts.seed)
if CUDA:
torch.cuda.manual_seed_all(opts.seed)
... |
def print_report(args: argparse.Namespace, reportfile: Union[(str, InstrumentationReport)]):
if isinstance(reportfile, str):
path = os.path.abspath(reportfile)
if (not os.path.isfile(path)):
print(path, 'does not exist, aborting.')
exit(1)
report = InstrumentationRepo... |
class LinearPredictiveCodingAnalysis(nn.Module):
def __init__(self, lpc_order, frame_length):
super(LinearPredictiveCodingAnalysis, self).__init__()
self.lpc = nn.Sequential(AutocorrelationAnalysis(lpc_order, frame_length), LevinsonDurbin(lpc_order))
def forward(self, x):
a = self.lpc(x)... |
class WarmUp(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def train_argparser():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--train_path', type=str, help='Path to train dataset')
arg_parser.add_argument('--valid_path', type=str, help='Path to validation dataset')
arg_parser.add_argument('--save_path', type=str, help='Path to directory wher... |
class Network(nn.Module):
def __init__(self, C, num_classes, layers, genotype, in_channels, drop_path_prob):
super(Network, self).__init__()
self._layers = layers
self.drop_path_prob = 0.0
stem_multiplier = 3
C_curr = (stem_multiplier * C)
self.stem = nn.Sequential(nn... |
def decode(s, strict=False, uts46=False, std3_rules=False):
if isinstance(s, (bytes, bytearray)):
s = s.decode('ascii')
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
result = []
if (not strict):
labels = _unicode_dots_re.split(s)
else:
label... |
class AST_Assign(AST_Node):
def __init__(self, context, lhs, rhs, op):
AST_Node.__init__(self, context)
self.lhs = lhs
self.rhs = rhs
self.op = op
self.children = [self.lhs, self.rhs]
def get_children(self):
retval = [self.lhs, self.rhs]
return retval
... |
def _find_detector_id(detector_id_prefix, detectors_path) -> str:
available_detector_ids = get_available_detector_ids(detectors_path)
detector_ids = ([id for id in available_detector_ids if (id == detector_id_prefix)] or [id for id in available_detector_ids if id.startswith(detector_id_prefix)])
if (not det... |
class BertConfig(PretrainedConfig):
model_type = 'bert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initia... |
def create_float_context(ctx):
ctx_float = get_extension_context(ctx.backend[0].split(':')[0], device_id=ctx.device_id)
return ctx_float |
.torch
def test_validation_dataset(sequential_dataset, item_user_sequential_dataset):
df = TorchSequentialValidationDataset(sequential_dataset, sequential_dataset, item_user_sequential_dataset, max_sequence_length=5)
assert (len(df) == 4)
assert (df[0].query_id == 0) |
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label |
def arr_to_toks(arr):
toks = []
for a in arr:
toks.append(Token(str(a), 0.0, 0.0))
return toks |
def read_image(img_dir):
img = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE)
(ret, thresh) = cv2.threshold(img, 127, 255, 0)
(_, contours, hierarchy) = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
return (thresh, cnt) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a editor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[250, 250], help='input image size')
args = parser.parse_args()
return args |
class FunctionProfile(object):
profiling = False
def __init__(self, fn, condition=None, profile_class=cProfile.Profile, print_freq=0, sort_keys=None, print_restrictions=None):
self.fn = fn
if (condition is None):
condition = _null_condition
self.condition = condition
... |
def haverkamp(mesh, **kwargs):
return _partition_args(mesh, Haverkamp_k, Haverkamp_theta, ['Ks', 'A', 'gamma'], ['alpha', 'beta', 'theta_r', 'theta_s'], **kwargs) |
def main():
args = get_args()
out = args.out_dir
all_h_shifts = args.all_h_shifts
num_workers = args.num_workers
dag_folder = Path(args.dag_folder)
dag_files = list(dag_folder.glob('*.json'))
if (out is None):
out = (dag_folder.parent / f'{dag_folder.stem}_subform.p')
out = Path(... |
class Seg(object):
def __init__(self, prefix_set):
self._prefix_set = prefix_set
def cut(self, text):
remain = text
while remain:
matched = ''
for index in range(len(remain)):
word = remain[:(index + 1)]
if (word in self._prefix_set... |
def export_torchscript_with_instances(model, fields):
with patch_instances(fields):
RPN.__annotations__['pre_nms_topk'] = Dict[(int, int)]
RPN.__annotations__['post_nms_topk'] = Dict[(int, int)]
scripted_model = torch.jit.script(model)
return scripted_model |
def test_move_only_holder_with_addressof_operator():
a = m.TypeForMoveOnlyHolderWithAddressOf.make()
a.print_object()
stats = ConstructorStats.get(m.TypeForMoveOnlyHolderWithAddressOf)
assert (stats.alive() == 1)
a.value = 42
assert (a.value == 42)
del a
assert (stats.alive() == 0) |
def test_silly_stuff():
a = ak.highlevel.Array([[0, 1, 2], 3]).layout
b = [[2], [0]]
with pytest.raises(IndexError):
a[b]
a = ak.highlevel.Array([[0, 1, 2], [3, 4], [5, 6], [7]]).layout
b = ak.highlevel.Array([[0, 2], None, [1], None]).layout
assert (to_list(a[b]) == [[0, 2], None, [6], ... |
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
(self.mean, self.logvar) = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, (- 30.0), 20.0)
self.deterministic = deterministic
... |
class V0LayerParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _V0LAYERPARAMETER |
def build_rosenbrock_function(a: float=1, b: float=100):
var = Variable(2)
x_var = var[0]
y_var = var[1]
x_minus_a = Sum([x_var, Constant((- a))])
y_minus_x2 = Sum([y_var, Product([Constant((- 1)), x_var, x_var])])
obj = Sum([Product(([x_minus_a] * 2)), Product([Constant(b), Product(([y_minus_x2... |
def format_dates(_ids):
date_strings = [num2str_month(_id) for _id in _ids]
return date_strings |
class XCLIPVideoModule(nn.Module):
def __init__(self, model_name_or_path: str):
super().__init__()
self.model_name_or_path = model_name_or_path
self.model = None
self.processor = None
self.load_model()
def load_model(self):
self.model = AutoModel.from_pretrained(s... |
def get_transforms():
transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
return transform |
(scope='module')
def english_model():
models_path = os.path.join(TEST_MODELS_DIR, 'en', 'lemma', '*')
models = glob.glob(models_path)
assert (len(models) >= 1)
model_file = models[0]
return trainer.Trainer(model_file=model_file) |
('/save', methods=['GET', 'POST'])
def save_file():
res = {'status': 'success'}
return jsonify(res) |
def main(argv):
args = parse_args(argv)
utils.general_setup(args.save, args.gpus)
logging.info('Arguments parsed.\n{}'.format(pprint.pformat(vars(args))))
val_loader = imagenet.get_val_loader(args.imagenet, args.batch_size, args.num_workers)
(model, loss) = model_factory.create_model(args.model, arg... |
class SoftmaxBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.input_one = torch.rand(N, C, H, W, device=device)
self.op_func = op_func()
def forward(self):
return self.op_func(self.input_one) |
_utils.test(debug=True)
def test_ternary_op_cond_is_scalar():
def test():
x = ti.Vector([3, 3, 3])
y = ti.Vector([5, 5, 5])
for i in range(10):
z = ti.select((i % 2), x, y)
if ((i % 2) == 1):
assert ((z[0] == x[0]) and (z[1] == x[1]) and (z[2] == x[2])... |
class VocabItem():
def __init__(self, string, hash=None):
self.string = string
self.count = 0
self.path = None
self.code = None
self.hash = hash
def __str__(self):
return 'VocabItem({})'.format(self.string)
def __repr__(self):
return self.__str__() |
class Block(nn.Module):
def __init__(self, dim, mlp_ratio=4, dpr=0.0, norm_layer=nn.BatchNorm2d, use_norm=True):
super().__init__()
self.norm1 = (norm_layer(dim) if use_norm else nn.Identity())
self.attn = PATM(dim)
self.drop_path = (DropPath(dpr) if (dpr > 0.0) else nn.Identity())
... |
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, ker... |
def default_fields(sample, prediction):
padding = torch.zeros(sample.pos.shape[0])
padding[sample.mask] = 1.0
label = sample.y
fields = {'prediction': prediction, 'label': label, 'error': (label - prediction), 'normals': sample[('normal' if ('normal' in sample) else 'norm')], 'geodesics': sample.geo, 'p... |
class CorrectionBox():
types = enum(TO_CORRECT=1, TO_REVIEW=2, RESOLVED=3, QUESTION=4)
def __init__(self, rect=None, annotation=''):
self.type = CorrectionBox.types.TO_CORRECT
self.bbox = rect
self.annotation = annotation
self.selected = False
return
def get_colour(se... |
class Mixed_5b(nn.Module):
def __init__(self):
super(Mixed_5b, self).__init__()
self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(BasicConv2d(192, 48, kernel_size=1, stride=1), BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2))
self.branc... |
class StringUnsField(BaseUnsField):
def validate_field(self, adata: AnnData) -> None:
super().validate_field(adata)
if (self.attr_key not in adata.uns):
raise KeyError(f'{self.attr_key} not found in adata.uns.')
def register_field(self, adata: AnnData) -> dict:
return super()... |
class AnsiCursor(object):
def UP(self, n=1):
return ((CSI + str(n)) + 'A')
def DOWN(self, n=1):
return ((CSI + str(n)) + 'B')
def FORWARD(self, n=1):
return ((CSI + str(n)) + 'C')
def BACK(self, n=1):
return ((CSI + str(n)) + 'D')
def POS(self, x=1, y=1):
retu... |
def mkdirp(dirname, overwrite=True):
try:
os.makedirs(dirname)
except OSError:
if (not os.path.isdir(dirname)):
raise
config_path = os.path.join(dirname, 'config.json')
if ((not overwrite) and os.path.lexists(config_path)):
raise OverwriteError(('%s exists... |
def RegressionHoeffdingTree(max_byte_size=, memory_estimate_period=1000000, grace_period=200, split_confidence=1e-07, tie_threshold=0.05, binary_split=False, stop_mem_management=False, remove_poor_atts=False, leaf_prediction='perceptron', no_preprune=False, nb_threshold=0, nominal_attributes=None, learning_ratio_percep... |
def _pipeline_parallel_pre_init(cfg: DistributedTrainingConfig):
from fairseq import utils
balance_exists = ((cfg.pipeline_balance is not None) or (cfg.pipeline_encoder_balance is not None) or (cfg.pipeline_decoder_balance is not None))
devices_exist = ((cfg.pipeline_devices is not None) or (cfg.pipeline_en... |
def test_instance_unit_norm_scaler():
import numpy as np
from pysad.transform.preprocessing import InstanceUnitNormScaler
X = np.random.rand(100, 25)
scaler = InstanceUnitNormScaler()
scaled_X = scaler.fit_transform(X)
assert np.all(np.isclose(np.linalg.norm(scaled_X, axis=1), 1.0))
scaler =... |
def download_file(url, DATA_DIR=DATA_DIR):
local_filename = url.split('/')[(- 1)]
local_filename = os.path.join(DATA_DIR, local_filename)
if os.path.exists(local_filename):
print(f'-I- file {local_filename} already exists, skipping download.')
return local_filename
with requests.get(url,... |
class SyncBatchNorm(Function):
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
if (not input.is_contiguous(memory_format=torch.channels_last)):
input = input.contiguous()
if (weight is not None):
weight = weight.con... |
def calculate_quantization_params(graph: Graph, fw_info: FrameworkInfo, nodes: List[BaseNode]=[], specific_nodes: bool=False, fw_impl: FrameworkImplementation=None):
Logger.info(f'''Running quantization parameters search. This process might take some time, depending on the model size and the selected quantization m... |
class GPTBigCodeForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def kl(p_logit, q_logit):
if (p_logit.shape[1] == 1):
return kl_binary(p_logit, q_logit)
else:
return kl_categorical(p_logit, q_logit) |
class BlockGraphView(object):
def nodes(self) -> List[NodeT]:
...
def edges(self) -> List[EdgeT]:
...
def in_degree(self, node: NodeT) -> int:
...
def out_degree(self, node: NodeT) -> int:
...
def sdfg(self) -> 'SDFG':
...
def all_nodes_recursive(self) -> ... |
def arc_distance(theta_1: dace.float64[N], phi_1: dace.float64[N], theta_2: dace.float64[N], phi_2: dace.float64[N]):
temp = ((np.sin(((theta_2 - theta_1) / 2)) ** 2) + ((np.cos(theta_1) * np.cos(theta_2)) * (np.sin(((phi_2 - phi_1) / 2)) ** 2)))
distance_matrix = (2 * np.arctan2(np.sqrt(temp), np.sqrt((1 - tem... |
class CustomGradientDescentOptimizer(BaseCustomOptimizer):
def _apply(self, grad, var, indices=None):
lr = tf.cast(self._learning_rate_tensor, grad.dtype.base_dtype)
return self._assign_sub(ref=var, updates=(lr * grad), indices=indices).op |
class ClanaCfg():
def read_clana_cfg(cls, cfg_file):
if os.path.isfile(cfg_file):
with open(cfg_file) as stream:
cfg = yaml.safe_load(stream)
else:
cfg = {'version': clana.__version__, 'data': {}}
return cfg
def get_cfg_path_from_cm_path(cls, cm_fi... |
class DerivAdjoint_J(Base_DerivAdjoint_Test):
formulation = 'CurrentDensity'
if testDeriv:
def test_Jvec_j_jy(self):
self.JvecTest('CurrentDensityy')
def test_Jvec_j_dhdtx(self):
self.JvecTest('MagneticFieldTimeDerivativex')
def test_Jvec_j_dhdtz(self):
... |
def save_to_folder(filename: str, output: dict, folder: str):
folder = Path(folder)
folder.mkdir(exist_ok=True, parents=True)
np.save((folder / f'{filename}'), output['mel'].cpu().numpy())
sf.write((folder / f'{filename}.wav'), output['waveform'], 22050, 'PCM_24') |
def lengths_to_attention_mask(lengths: Tensor, left_context: Optional[int]=None, right_context: Optional[int]=None) -> Optional[Tensor]:
if ((left_context is None) and (right_context is None)):
return None
max_length = int(torch.max(lengths).item())
indices = (torch.arange(max_length, device=lengths... |
class BaseRingLift(Morphism):
def _call_(self, x):
T = self.codomain()
R = T.base_ring()
return T.term(T.indices().one(), R(x)) |
def broadcast(tensor, src, group=group.WORLD):
assert (torch.distributed.deprecated._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return torch._C._dist_broadcast(tensor, src, group) |
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
cls.add_constructor([param('double', '_x'), param('double', '_y')])
cls.add_constructor([])
cls.add_instance_attribute('x', 'double', is_const=False)
... |
_pyctcdecode
class Wav2Vec2ProcessorWithLMTest(unittest.TestCase):
def setUp(self):
vocab = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.add_kwargs_tokens_map = {'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
... |
def load_time_data(logdir: os.PathLike, jobtype: str) -> pd.DataFrame:
assert (jobtype in {'train', 'eval', 'hmc'})
fpaths = Path(logdir).rglob(f'step-timer-{jobtype}')
data = {}
for (idx, fpath) in enumerate(fpaths):
tdata = pd.read_csv(fpath)
data[f'{idx}'] = tdata
return pd.DataFr... |
def main(unused_argv):
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, eval_fn) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.