code stringlengths 101 5.91M |
|---|
def fast_deepcopy(value: Any) -> Any:
if isinstance(value, dict):
return {key: fast_deepcopy(v) for (key, v) in value.items()}
if isinstance(value, list):
return [fast_deepcopy(v) for v in value]
return value |
.parametrize('model_name', ['facebook/opt-125m', 'facebook/opt-350m', 'facebook/opt-1.3b'])
def test_opt_optimized(model_name):
dtype = torch.float16
device = 'cuda'
config = opt_config_to_gpt2_config(OPTConfig.from_pretrained(model_name))
config.use_flash_attn = True
config.fused_bias_fc = True
... |
def print_expression_tree(expr: sf.Expr) -> None:
from sympy.printing.tree import tree
unfiltered_tree = tree(expr).split('\n')
filtered_tree = '\n'.join((v for (i, v) in enumerate(unfiltered_tree) if (('+-' in v) or (i == 0))))
print(filtered_tree) |
class BatchDatset():
files = []
images = []
annotations = []
image_options = {}
batch_offset = 0
epochs_completed = 0
def __init__(self, records_list, image_options={}):
print('Initializing Batch Dataset Reader...')
print(image_options)
self.files = records_list
... |
def cook_refs(refs, n=4):
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram, count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
return ([len(ref) for ref in refs], maxcounts) |
class TensorSchemaBuilder():
def __init__(self) -> None:
self._tensor_schema: Dict[(str, TensorFeatureInfo)] = {}
def categorical(self, name: str, cardinality: int, is_seq: bool=False, feature_source: Optional[TensorFeatureSource]=None, feature_hint: Optional[FeatureHint]=None, embedding_dim: Optional[i... |
class SawyerCoffeePushV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'mug_pos': obs[3:6], 'goal_xy': obs[9:11], 'unused_info': obs[[6, 7, 8, 11]]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'gra... |
def pickle_data(file_name, *args):
out_file = open(file_name, 'wb')
cPickle.dump(len(args), out_file, protocol=2)
for item in args:
cPickle.dump(item, out_file, protocol=2)
out_file.close() |
class FilterPrep():
def __init__(self, input_file_dir='SORE/data/OpenIE/inputs/', output_dir='SORE/data/filter_data/'):
self.input_file_dir = input_file_dir
self.output_dir = output_dir
def determine_output_name(self, prefix='test', SUBWORDUNIT=True, STEMMING=False, STOPWORDS=False):
if ... |
def get_stats():
dataset_stats = {'Name': [], '#Cat.': [], '#Num.': [], '#Text': [], 'Problem Type': [], '#Train': [], '#Test': [], '#Competition': [], 'Metric': []}
train_dataset_l = []
test_dataset_l = []
for dataset_name in dataset_registry.list_keys():
print('Processing:', dataset_name)
... |
def model_and_diffusion_defaults():
res = dict(image_size=64, num_channels=128, num_res_blocks=2, num_heads=4, num_heads_upsample=(- 1), num_head_channels=(- 1), attention_resolutions='16,8', channel_mult='', dropout=0.0, class_cond=False, use_checkpoint=False, use_scale_shift_norm=True, resblock_updown=False, use_... |
class ArgumentRwTexture(Argument):
def __init__(self, name: Optional[str], fmt: ti.Format, ndim: int):
super().__init__(name)
self.fmt: ti.Format = fmt
self.ndim: int = ndim |
def recursive_detach(x):
if (type(x) is tuple):
return tuple([recursive_detach(item) for item in x])
elif (type(x) is list):
return [recursive_detach(item) for item in x]
else:
return x.detach() |
class NotConvergedError(CashocsException):
def __init__(self, solver: str, message: Optional[str]=None) -> None:
super().__init__()
self.solver = solver
self.message = message
def __str__(self) -> str:
main_msg = f'The {self.solver} failed to converge.'
post_msg = (f'''
{... |
class SingleInvaderSpaceInvadersWorld(SpaceInvadersWorld):
invader_class = CrossScreenMovingInvader
invaders_per_row = 1
parameters_invader_missile = {'class': InvaderMissile, 'fire_rate': 10, 'max_missiles': 20, 'missile_impulse': 100}
def initial_invader_row(self):
return (self._height - 200)
... |
def convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model):
import torch
flax_state_dict = {}
for shard_file in shard_filenames:
pt_state_dict = torch.load(shard_file)
pt_state_dict = {k: v.numpy() for (k, v) in pt_state_dict.items()}
model_prefix = flax_model.base_m... |
def patch_snakemake_cache(zenodo_doi, sandbox_doi):
logger = get_logger()
output_file_cache = snakemake.workflow.workflow.output_file_cache
if (output_file_cache is not None):
if (zenodo_doi is not None):
zenodo = Zenodo(zenodo_doi)
else:
zenodo = None
if (san... |
def register_Ns3OlsrMessageHeaderHello_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::olsr::MessageHeader::Hello const &', 'arg0')])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')])
cls.add_method(... |
def resnet50(pretrained=False, progress=True, **kwargs):
model = _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
model.fc = nn.Linear(2048, kwargs['num_classes'])
return model |
def preprocess(sources: Sequence[str], targets: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
examples = [(s + t) for (s, t) in zip(sources, targets)]
(examples_tokenized, sources_tokenized) = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_t... |
class LocalStd(LocalAffinity):
def _init_aff(self):
weight = torch.zeros(9, 1, 3, 3)
weight.zero_()
weight[(0, 0, 0, 0)] = 1
weight[(1, 0, 0, 1)] = 1
weight[(2, 0, 0, 2)] = 1
weight[(3, 0, 1, 0)] = 1
weight[(4, 0, 1, 1)] = 1
weight[(5, 0, 1, 2)] = 1
... |
def test_analyse_module_dependencies(parsed_module_complex_dependencies):
test_cluster = analyse_module(parsed_module_complex_dependencies)
assert (test_cluster.num_accessible_objects_under_test() == 1)
assert (len(test_cluster.generators) == 3)
assert (len(test_cluster.modifiers) == 1) |
def download_bravura_font(overwrite: bool=False):
if ((not overwrite) and get_bravura_font_path().is_file()):
print('Skip downloading as the Bravura font is found.')
return
get_bravura_font_dir().mkdir(parents=True, exist_ok=True)
print('Start downloading Bravura font.')
prefix = '
u... |
class BimodalTrafficMatrix(TrafficMatrix):
def __init__(self, problem, tm, fraction, low_range, high_range, seed=0, scale_factor=1.0):
assert (0.0 <= low_range[0] < low_range[1] < high_range[0] < high_range[1])
self._fraction = fraction
self._low_range = low_range
self._high_range = ... |
class OperatorEngine(object):
def __init__(self, name):
self.op_name = name
(self.base_op_name, self.engine) = name.split('_ENGINE_', 1)
def getDeviceImpl(self):
deviceImplList = []
for (device, impl) in [('CPU', OpSchema.get_cpu_impl(self.op_name)), ('CUDA', OpSchema.get_cuda_im... |
class ConjugateGradientOptimizer(Serializable):
def __init__(self, cg_iters=10, reg_coeff=1e-05, subsample_factor=1.0, backtrack_ratio=0.8, max_backtracks=15, accept_violation=False, hvp_approach=None, num_slices=1):
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg... |
()
('--seed', default=1)
('--max_path_length', default=150)
('--meta_batch_size', default=10)
('--n_epochs', default=10)
('--episode_per_task', default=10)
_experiment
def rl2_ppo_metaworld_ml10_meta_test(ctxt, seed, max_path_length, meta_batch_size, n_epochs, episode_per_task):
set_seed(seed)
with LocalTFRunne... |
class SigmoidPow(torch.autograd.Function):
def forward(ctx, x: torch.Tensor, y: torch.Tensor):
logsigmoid = F.softplus((- x)).neg()
out = logsigmoid.mul(y).exp()
ctx.save_for_backward(out, logsigmoid, x, y)
return out
def backward(ctx, gout: torch.Tensor):
(out, logsigmoi... |
def local_dsprites_parser():
parser = dsprites_parser()
parser.add_argument('--rseed', default=0, help='random seed', type=int)
parser.add_argument('--beta_min', default=0.1, help='min value for +beta*kl_cost', type=float)
parser.add_argument('--beta_max', default=10.0, help='max value for +beta*kl_cost... |
def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32):
i = [[0, 1, 1], [2, 0, 2]]
v = [3.2, 4.1, 5.3]
tensor = torch.sparse_coo_tensor(i, v, (3, 3), requires_grad=requires_grad, dtype=dtype)
if coalesce:
tensor = tensor.coalesce()
return tensor |
def startswith(list_, prefix):
if (len(prefix) > len(list_)):
return False
return (list_[:len(prefix)] == prefix) |
class Factor():
def __init__(self, domain, values):
assert (domain.size() == values.size), 'domain size does not match values size'
assert ((values.ndim == 1) or (values.shape == domain.shape)), 'invalid shape for values array'
self.domain = domain
self.values = values.reshape(domain... |
class BatchMapping(Mapping):
__metaclass__ = ABCMeta
def get_batch(self, keys):
pass
def __getitem__(self, key):
val = self.get_batch([key])[0]
if isinstance(val, Failure):
raise KeyError(key)
return val
def contains_batch(self, keys):
pass
def __c... |
def default_mini_imagenet_loading_transform(image_size: int) -> Callable:
return transforms.Compose([transforms.Resize([int((image_size * 2.0)), int((image_size * 2.0))]), transforms.ToTensor()]) |
def update_arguments(args):
if (args.infer or args.verify or args.word_label):
args.small = True
if (not args.train):
args.batch_size *= 30
if args.cpu:
args.device = 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
else:
args.device = torch.device(('cuda' if torch... |
class Task(Enum):
xnli: str = 'xnli'
pawsx: str = 'pawsx'
mldoc: str = 'mldoc'
langid: str = 'langid'
conllner: str = 'ner-conll'
wikiner: str = 'ner-wiki'
udpos: str = 'udpos'
parsing: str = 'parsing'
alignment: str = 'alignment' |
def build_sqa_zero_dataset(folder, template_files):
os.makedirs(folder, exist_ok=True)
table_processor = get_codex_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='gpt2')
eval_dataset = load_dataset('msr_sqa', split='test')
prompt_files = template_files
for prompt_file in promp... |
def scale_dimension(dim, scale):
if isinstance(dim, tf.Tensor):
return tf.cast((((tf.cast(dim, tf.float32) - 1.0) * scale) + 1.0), dtype=tf.int32)
else:
return int((((float(dim) - 1.0) * scale) + 1.0)) |
class PartitionAlgebra_ak(PartitionAlgebra_generic):
def __init__(self, R, k, n, name=None):
if (name is None):
name = ('Partition algebra A_%s(%s)' % (k, n))
cclass = SetPartitionsAk(k)
self._element_class = PartitionAlgebraElement_ak
PartitionAlgebra_generic.__init__(se... |
def register_Ns3EnergyHarvester_methods(root_module, cls):
cls.add_constructor([param('ns3::EnergyHarvester const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetEnergySource', 'ns3::Ptr< ns3::EnergySource >', [], is_const=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=Tr... |
class BaseMultiDataLoader():
def __init__(self, dataloaders):
self.dataloaders = dataloaders
self.batch_size = self.dataloaders[0].batch_size
def __getitem__(self, item):
dl_idx = (item % len(self.dataloaders))
return next(iter(self.dataloaders[dl_idx]))
def __len__(self):
... |
def length_to_mask(length, max_len=None, dtype=None, device=None):
assert (len(length.shape) == 1)
if (max_len is None):
max_len = length.max().long().item()
mask = (torch.arange(max_len, device=length.device, dtype=length.dtype).expand(len(length), max_len) < length.unsqueeze(1))
if (dtype is N... |
class LpRegularizer(BaseRegularizer):
def __init__(self, p=2, power=1, **kwargs):
super().__init__(**kwargs)
self.p = p
self.power = power
self.add_to_recordable_attributes(list_of_names=['p', 'power'], is_stat=False)
def compute_loss(self, embeddings):
reg = torch.norm(e... |
def Klein7RegularGraph():
g7 = Graph(':W__`AaBbC_CDbDcE`F_AG__IgHIJbFGIKaFHILeFGHMdFKN_EKOPaCNPQ`HOQRcGLRS`BKMSTdJKLPTU', loops=False, multiedges=False)
g7._circle_embedding([0, 2, 3, 1, 9, 16, 20, 21, 4, 19, 17, 7, 15, 10, 8, 13, 11, 5, 23, 22, 14, 12, 18, 6])
g7.name('Klein 7-regular Graph')
return g7 |
def read_tsv(data_filename, meta_filename):
with open(meta_filename) as f:
column_info = f.readlines()
column_info_raw = [x.replace('{', ' ').replace('}', ' ').split() for x in column_info]
discrete = []
continuous = []
column_info = []
for (idx, item) in enumerate(column_info_raw):
... |
def test_is_fulfilled(stopping_condition):
stopping_condition.reset()
stopping_condition.set_limit(0)
time.sleep(0.05)
assert stopping_condition.is_fulfilled() |
def download(url, filename, delete_if_interrupted=True, chunk_size=4096):
try:
with open(filename, 'wb') as f:
print('Downloading {} > {}'.format(url, filename))
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if... |
class RunnerInterface_0_0_8(CommandLineArgsRunnerInterface):
def version() -> StrictVersion:
return StrictVersion('0.0.8')
def changelog() -> str:
return 'Pass dependency classpath to detectors as `dep_classpath`.'
def _get_supported_cli_args():
return ['target', 'run_info', 'detecto... |
def deconv(x, channels, kernel=3, stride=2, use_bias=True, scope='deconv_0'):
with tf.variable_scope(scope):
x = tf.layers.conv2d_transpose(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, strides=stride, use_bias=use_bias, padding='SAME'... |
class LoadArffFilesCases(FixtureParameterFactory):
def arff_1(self):
return StringIO(ARFF_CATEGORICAL_INDEX_1)
def arff_2(self):
return StringIO(ARFF_CATEGORICAL_INDEX_2)
def data_with_categorical_index_1(self):
values = ['SampleOne', 'SampleTwo', 'SampleThree', 'SampleFour']
... |
class LossHistogramsMatching(nn.Module):
def __init__(self):
super(LossHistogramsMatching, self).__init__()
self.measure = _JSD()
def forward(self, trg_his, src_his):
assert (trg_his.ndim == 2), "'trg_his' must have 2 dims. found {}.".format(trg_his.ndim)
assert (src_his.ndim == ... |
def simAddDrawingObject(objectType, size, duplicateTolerance, parentObjectHandle, maxItemCount, ambient_diffuse=None, specular=None, emission=None):
if (ambient_diffuse is None):
ambient_diffuse = ffi.NULL
if (specular is None):
specular = ffi.NULL
if (emission is None):
emission = f... |
def runtime_validation(f):
if (f.__name__ != '__iter__'):
raise TypeError("Can not decorate function {} with 'runtime_validation'".format(f.__name__))
(f)
def wrapper(self):
global _runtime_validation_enabled
if (not _runtime_validation_enabled):
(yield from f(self))
... |
class Market1501(ImageDataset):
_junk_pids = [0, (- 1)]
dataset_dir = 'market1501'
dataset_url = '
def __init__(self, root='', market1501_500k=False, **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = '/data/syh/datasets/STREET/ReID_downsample/'
self.data... |
class TestSetState(object):
def setup(self):
self.seed =
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'], self.state['state']['key'], self.state['state']... |
def test_asdict_as_float() -> None:
b = torch.rand([], dtype=torch.float32)
d = D2(a=1.0, b=b)
dict_d = asdict_as_float(d)
assert (dict_d['a'] == 1.0)
assert (dict_d['b'] == b.numpy()) |
class MagneticField():
SIZE = 6
def from_reader(reader: _ResponseReader):
assert (reader.remaining() >= MagneticField.SIZE)
rv = MagneticField()
rv.x = reader.read_bytes(2)
rv.y = reader.read_bytes(2)
rv.z = reader.read_bytes(2)
return rv
def __repr__(self):
... |
def draw_strokes(data, factor=1, svg_filename='sample.svg'):
(min_x, max_x, min_y, max_y) = get_bounds(data, factor)
dims = (((50 + max_x) - min_x), ((50 + max_y) - min_y))
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims, fill='white'))
lift_pen = 1
abs_... |
def register_Ns3RandomRectanglePositionAllocator_methods(root_module, cls):
cls.add_constructor([param('ns3::RandomRectanglePositionAllocator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True)
cls.add_method('GetNext', 'n... |
def load_yaml(path: Union[(str, Path, TextIO)], compressed: bool=None) -> Music:
if isinstance(path, (str, Path)):
if (compressed is None):
if str(path).lower().endswith('.gz'):
compressed = True
else:
compressed = False
if compressed:
... |
class Market1501(BaseImageDataset):
dataset_dir = 'market1501'
def __init__(self, root='data', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'train')
self.val_dir = osp.j... |
.parametrize('create_solver', ss.solvers.values())
def test_kind(create_solver):
s = create_solver(False)
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
(prop, ts) = build_simple_alu_fts(s)
kind = pono.KInduction(prop, ts, s)
res = kind.check_until(1)
assert (res is Tru... |
def process_img(args, src_image_root, dst_image_root):
(img_idx, img_info, anns) = args
src_img = Image.open(osp.join(src_image_root, img_info['file_name']))
labels = []
for (ann_idx, ann) in enumerate(anns):
attrs = ann['attributes']
text_label = attrs['transcription']
if ((not ... |
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed) |
def execution_results(query, username, password, timeout=3):
connection = pymysql.connect(user=username, password=password)
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, timeout_handler)
syntactic = Tr... |
class TextLoggerHook(LoggerHook):
def __init__(self, interval=10, ignore_last=True, reset_flag=False):
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.time_sec_tot = 0
def before_run(self, trainer):
super(TextLoggerHook, self).before_run(trainer)
self... |
def error_analysis(sp):
dataset = data_loader.load_processed_data(args)
dev_examples = dataset['dev']
sp.schema_graphs = dataset['schema']
print('{} dev examples loaded'.format(len(dev_examples)))
if (len(ensemble_model_dirs) <= 2):
print('Needs at least 3 models to perform majority vote')
... |
def test_rollout(env, policy):
worker = VecWorker(seed=SEED, max_path_length=MAX_PATH_LENGTH, worker_number=0, n_envs=N_TRAJ)
worker.update_agent(policy)
worker.update_env(env)
traj = worker.rollout()
assert (len(traj.lengths) == N_TRAJ)
traj2 = worker.rollout()
assert (len(traj2.lengths) ==... |
class Trainer():
def __init__(self, model):
self.model = model
self.opt = (model.module.opt if isinstance(model, nn.parallel.DistributedDataParallel) else model.opt)
self.start_epoch = (self.opt.start_epoch if self.opt.start_epoch else 1)
self.lr = self.opt.learning_rate
self... |
class HardEMQuantizer(nn.Module):
def __init__(self, config, num_embeddings, embedding_dim, split):
super().__init__()
self.K = num_embeddings
self.D = embedding_dim
self.M = split
self.relax = config.em.relax
self.embeddings = nn.Parameter(torch.randn(self.M, self.K,... |
def test_centernet_head_get_bboxes():
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': np.array([1.0, 1.0, 1.0, 1.0]), 'pad_shape': (s, s, 3), 'batch_input_shape': (s, s), 'border': (0, 0, 0, 0), 'flip': False}]
test_cfg = ConfigDict(dict(topk=100, local_maximum_kernel=3, max_per_img=100))
... |
def default_compute_objective(metrics: Dict[(str, float)]) -> float:
loss = metrics.pop('eval_loss', None)
_ = metrics.pop('epoch', None)
_ = metrics.pop('total_flos', None)
return (loss if (len(metrics) == 0) else sum(metrics.values())) |
class GradedModulesCategory(RegressiveCovariantConstructionCategory, Category_over_base_ring):
def __init__(self, base_category):
super().__init__(base_category, base_category.base_ring())
_functor_category = 'Graded'
def _repr_object_names(self):
return 'graded {}'.format(self.base_category... |
def gen_template_head(args, set_default=True):
code_gen = 'template <\n'
code_gen += gen_template_args(args, set_default)
code_gen += '>\n'
return code_gen |
def calc_mrr(testfile, predfile, resfile):
with open(testfile, 'r') as ftest, open(predfile, 'r') as fpred, open(resfile, 'w') as fout:
data = []
pred = []
n = 0
for line in ftest:
data.append(line.strip().split('\t'))
for line in fpred:
try:
... |
class MotNet(nn.Module):
def __init__(self, num_classes):
super(MotNet, self).__init__()
print('MotNet...')
self.num_classes = num_classes
self.hidden_dim = 64
self.net = nn.Sequential(nn.Conv3d(in_channels=hyp.feat3D_dim, out_channels=self.hidden_dim, kernel_size=3, padding=... |
class Zacharov(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.0)] * self.N), ([10.0] * self.N)))
self.custom_bounds = ([(- 1), 1], [(- 1), 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.... |
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if (not osp.isfile(filename)):
raise FileNotFoundError(msg_tmpl.format(filename)) |
def perm_mh(m, h):
n = (m + h)
M = MatrixSpace(ZZ, m, n)
A = M(0)
for i in range(m):
for j in range(n):
if ((i <= j) and (j <= (i + h))):
A[(i, j)] = 1
return A.permanent() |
class REGC(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, dropout, use_egc=True, egc_heads=8, egc_bases=4):
super(self).__init__()
node_types = list(NUM_NODES_DICT.keys())
self.embs = ParameterDict({key: Parameter(torch.Tensor(NUM_NODES_DICT[key], IN_FEATURES)) for key in ... |
def cmidd(x, y, z):
return (((entropyd(zip(y, z)) + entropyd(zip(x, z))) - entropyd(zip(x, y, z))) - entropyd(z)) |
class SokobanProblem(Problem):
_tile_types = ['empty', 'solid', 'player', 'crate', 'target']
def __init__(self, cfg: Config):
super().__init__(cfg)
self._width = 5
self._height = 5
self._prob = {'empty': 0.45, 'solid': 0.4, 'player': 0.05, 'crate': 0.05, 'target': 0.05}
s... |
def getdataloader(datatype, train_db_path, test_db_path, batch_size, workers=4):
(transform_train, transform_test) = _getdatatransformsdb(datatype=datatype.lower().split('+')[0].split('_')[0])
n_classes = 0
if (datatype.lower().split('+')[0] == 'imagenet32'):
print('Using resized imagenet(32 * 32)')... |
def get_optimizer(params, lr=0.001):
optimizer = torch.optim.AdamW(params, lr=lr, betas=(0.9, 0.999), eps=1e-06, amsgrad=True)
return optimizer |
def _date_convertor(r):
mls = r.split('.')
if (len(mls) == 1):
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S')
else:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S.%f')
return new_t |
def main(args):
args.workers = int(args.workers)
args.dev_features_file = '../data/pickle-file/dev_features.pkl'
args.dev_examples_file = '../data/pickle-file/dev_examples.pkl'
args.dev_json_file = '../data/squad/my_dev.json'
args.test_features_file = '../data/pickle-file/test_features.pkl'
args... |
def IPOT(C, n, m, beta=0.5):
sigma = (torch.ones(m, 1) / m.float())
T = torch.ones([n, m])
A = torch.exp(((- C) / beta))
for t in range(50):
Q = (A * T)
for k in range(1):
delta = (1 / (n.float() * torch.matmul(Q, sigma)))
sigma = (1 / (m.float() * torch.matmul(Q.... |
def compose(*functions: Callable) -> Callable:
def noop(x: Any) -> Any:
return x
return functools.reduce((lambda f, g: (lambda x: f(g(x)))), functions, noop) |
class RPNTest(unittest.TestCase):
def test_rpn(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = 'RPN'
cfg.MODEL.ANCHOR_GENERATOR.NAME = 'DefaultAnchorGenerator'
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1)
backbone = build_backbone(cf... |
class TransformerDecoder():
def __init__(self, config, maybe_pjit, maybe_with_sharding_constraint, optimizer=None):
self.config = config
dp = thread_resources.env.shape['dp']
mp = thread_resources.env.shape['mp']
assert (mp == config['tpu_cores'])
def create_sharded_state(key... |
def p_ignorable_statement(s):
if (s.sy == 'BEGIN_STRING'):
pos = s.position()
string_node = p_atom(s)
s.expect_newline('Syntax error in string', ignore_semicolon=True)
return Nodes.ExprStatNode(pos, expr=string_node)
return None |
class FloatingPointGeneric(LocalGeneric):
def is_floating_point(self):
return True
def _prec_type(self):
return 'floating-point'
def _test_distributivity(self, **options):
tester = self._tester(**options)
S = tester.some_elements()
from sage.misc.misc import some_tupl... |
def test_predict_proba_raises():
(yield (check_predict_proba_raises, FactorizationMachineClassifier))
(yield (check_predict_proba_raises, PolynomialNetworkClassifier)) |
class CreateWikiTrainingData(PipelineJob):
def __init__(self, preprocess_jobs: Dict[(str, PipelineJob)], opts):
super().__init__(requires=['data/indexes/redirects_en.ttl.bz2.dict', f'data/versions/{opts.data_version_name}/indexes/keyword_processor.pickle', f'data/versions/{opts.data_version_name}/indexes/po... |
def load_examples_agn(path, args):
topics = [' world', ' sports', ' business', ' science']
label_path = './task_data/agn/label_names_kb.txt'
label2synonym = load_label(label_path)
prompt = ' topic:'
icl_str = ''
if (args.k_shot > 0):
train_examples = []
train_path = path.replace(... |
(frozen=True)
class TokenizationToken():
value: Union[(str, int)]
text_range: Optional[TextRange] = None |
_class(removal_version='0.19.0', future_warn=True)
class SmoothDeriv2(SmoothnessSecondOrder):
pass |
def get_pytorch_tpc() -> tp.TargetPlatformCapabilities:
tflite_tp_model = get_tp_model()
return generate_pytorch_tpc(name='tflite_torch', tp_model=tflite_tp_model) |
def load_i3d_pretrained(device=torch.device('cpu')):
from evals.fvd.pytorch_i3d import InceptionI3d
i3d = InceptionI3d(400, in_channels=3).to(device)
filepath = download(_I3D_PRETRAINED_ID, 'i3d_pretrained_400.pt')
i3d.load_state_dict(torch.load(filepath, map_location=device))
i3d.eval()
return ... |
def get_tokenized_phrases(in_directory):
phrases = get_phrases_from_directory(in_directory)
phrases = process_utils.get_ptb_tokenized_phrases(phrases)
print('Found {} phrases in arguana'.format(len(phrases)))
return phrases |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.