code stringlengths 101 5.91M |
|---|
class ImageEncoder(nn.Module):
def __init__(self):
super(ImageEncoder, self).__init__()
self.backbone = resnet34(in_channels=3, pretrained=False, progress=True)
def forward(self, x):
resnet_out = self.backbone(x)
return resnet_out |
def get_loading_backend():
if _torchaudio_available():
return torchaudio_loader
if _sndfile_available():
return soundfile_loader |
def create_RealField(prec=53, type='MPFR', rnd='RNDN', sci_not=0):
if (type == 'RDF'):
from .real_double import RDF
return RDF
elif (type == 'Interval'):
from .real_mpfi import RealIntervalField
return RealIntervalField(prec, sci_not)
elif (type == 'Ball'):
from .real_arb import RealBallField
return RealBallField(prec)
elif (type == 'RLF'):
from .real_lazy import RLF
return RLF
else:
from .real_mpfr import RealField
return RealField(prec, sci_not, rnd) |
def find_lcmtypes_dirs(root_path, excluded_paths=None):
remaining_excluded_paths = set((excluded_paths or []))
for (dirpath, dirnames, _) in os.walk(root_path, topdown=True, followlinks=False):
rel_dir = os.path.relpath(dirpath, root_path)
if (rel_dir in remaining_excluded_paths):
remaining_excluded_paths.remove(rel_dir)
dirnames[:] = []
continue
if (os.path.basename(dirpath) == 'lcmtypes'):
(yield rel_dir)
dirnames[:] = [] |
class ExplorationStrategy():
def get_action(self, t, observation, policy, **kwargs):
raise NotImplementedError
def get_actions(self, t, observations, policy, **kwargs):
raise NotImplementedError
def reset(self):
pass |
def test_train(train_data_fx, model_fx):
h = model_fx.train(train_data_fx[0], train_data_fx[1], epochs=10) |
def _iter_optplan_fields(model: models.Model, visited: Set[int], process_field: Callable[([models.Model, Union[(str, optplan.ProblemGraphNode)]], None)], pass_field_info: bool=False) -> None:
if (not isinstance(model, models.Model)):
return
if (id(model) in visited):
return
visited.add(id(model))
def process_field_wrapped(parent: models.Model, child: Union[(str, optplan.ProblemGraphNode)], field_type: optplan.ReferenceType) -> optplan.ProblemGraphNode:
if pass_field_info:
return_val = process_field(parent, child, field_type)
else:
return_val = process_field(parent, child)
if (return_val is None):
return child
return return_val
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for (field_name, field_type) in model.fields.items():
if (model[field_name] is None):
continue
if isinstance(model[field_name], models.Model):
_iter_optplan_fields(model[field_name], visited, process_field, pass_field_info=pass_field_info)
elif isinstance(field_type, types.ListType):
for item in model[field_name]:
_iter_optplan_fields(item, visited, process_field, pass_field_info=pass_field_info)
elif isinstance(field_type, types.DictType):
for (_, value) in model[field_name].items():
_iter_optplan_fields(value, visited, process_field, pass_field_info=pass_field_info)
if isinstance(field_type, optplan.ReferenceType):
model[field_name] = process_field_wrapped(model, model[field_name], field_type)
elif (isinstance(field_type, types.ListType) and isinstance(field_type.field, optplan.ReferenceType)):
model[field_name] = [process_field_wrapped(model, m, field_type.field) for m in model[field_name]]
elif (isinstance(field_type, types.DictType) and isinstance(field_type.field, optplan.ReferenceType)):
model[field_name] = {key: process_field_wrapped(model, m, field_type.field) for (key, m) in model[field_name].items()} |
def schur(ambient_dim=None, lattice=None):
from sage.geometry.cone import Cone
from sage.matrix.constructor import matrix
from sage.rings.integer_ring import ZZ
(ambient_dim, lattice) = _preprocess_args(ambient_dim, lattice)
def _f(i, j):
if (i == j):
return 1
elif ((j - i) == 1):
return (- 1)
else:
return 0
S = matrix(ZZ, max(0, (ambient_dim - 1)), ambient_dim, _f)
return Cone(S.rows(), lattice) |
def parallel_self_attention(model_parallel_size, num_att_heads_per_partition, hidden_size_per_att_head, dropout_prob, batch_size, sequence_length):
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
num_att_heads = (num_att_heads_per_partition * torch.distributed.get_world_size())
hidden_size = (hidden_size_per_att_head * num_att_heads)
identity_layer = IdentityLayer3D(batch_size, sequence_length, hidden_size).cuda()
attention_layer = mpu.BertParallelSelfAttention(hidden_size, num_att_heads, dropout_prob).cuda()
loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).cuda()
attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).cuda()
input_ = identity_layer()
output = attention_layer(input_, attention_mask)
loss = torch.mul(output, loss_weight).sum()
loss.backward()
rank = mpu.get_model_parallel_rank()
mpu.destroy_model_parallel()
return (rank, hidden_size, model_parallel_size, loss, attention_layer, identity_layer) |
def _impl(arrays, axis, nested, parameters, with_name, highlevel, behavior, attrs):
axis = regularize_axis(axis)
if isinstance(arrays, Mapping):
index_arrays = {n: ak.local_index(x, axis) for (n, x) in arrays.items()}
else:
index_arrays = [ak.local_index(x) for x in arrays]
if (with_name is not None):
if (parameters is None):
parameters = {}
else:
parameters = dict(parameters)
parameters['__record__'] = with_name
return ak.operations.cartesian(index_arrays, axis=axis, nested=nested, parameters=parameters, highlevel=highlevel, behavior=behavior) |
class TorchFixedNormalizer(FixedNormalizer):
def normalize(self, v, clip_range=None):
if (clip_range is None):
clip_range = self.default_clip_range
mean = ptu.np_to_var(self.mean, requires_grad=False)
std = ptu.np_to_var(self.std, requires_grad=False)
if (v.dim() == 2):
mean = mean.unsqueeze(0)
std = std.unsqueeze(0)
return torch.clamp(((v - mean) / std), (- clip_range), clip_range)
def normalize_scale(self, v):
std = ptu.np_to_var(self.std, requires_grad=False)
if (v.dim() == 2):
std = std.unsqueeze(0)
return (v / std)
def denormalize(self, v):
mean = ptu.np_to_var(self.mean, requires_grad=False)
std = ptu.np_to_var(self.std, requires_grad=False)
if (v.dim() == 2):
mean = mean.unsqueeze(0)
std = std.unsqueeze(0)
return (mean + (v * std))
def denormalize_scale(self, v):
std = ptu.np_to_var(self.std, requires_grad=False)
if (v.dim() == 2):
std = std.unsqueeze(0)
return (v * std) |
def ud_scores(gold_conllu_file, system_conllu_file):
gold_ud = ud_eval.load_conllu_file(gold_conllu_file)
system_ud = ud_eval.load_conllu_file(system_conllu_file)
evaluation = ud_eval.evaluate(gold_ud, system_ud)
return evaluation |
def initialize_latent_search(agent, latent_search_policy, max_search_steps=10):
agent.initialize_search(latent_search_policy.rb_vec, max_search_steps=max_search_steps) |
class RandomForestForecaster(SKLearnForecaster):
config_class = RandomForestForecasterConfig
def __init__(self, config: RandomForestForecasterConfig):
super().__init__(config)
self.model = RandomForestRegressor(n_estimators=self.config.n_estimators, max_depth=self.config.max_depth, min_samples_split=self.config.min_samples_split, random_state=self.config.random_state) |
def to_string(instring, tokensStart, retTokens):
val = retTokens[0]
val = (("'" + val[1:(- 1)].replace("''", "\\'")) + "'")
return {'literal': ast.literal_eval(val)} |
_model
def SReT_LT_wo_slice_distill(pretrained=False, **kwargs):
model = DistilledRecursiveTransformer(image_size=224, patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[4, 10, 6], recursive_num=[2, 5, 3], heads=[2, 4, 8], mlp_ratio=4, np_mlp_ratio=1, **kwargs)
if pretrained:
state_dict = torch.load('SReT_LT_wo_slice_distill.pth', map_location='cpu')
model.load_state_dict(state_dict['model'])
return model |
def assert_incompatible_shapes_raise(input_shapes):
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays) |
def print_memory_stats(message=''):
return
import psutil
global_info = psutil.virtual_memory()
(total, available, used, free) = (global_info.total, global_info.available, global_info.used, global_info.free)
info = psutil.Process().memory_info()
(rss, vms, shared) = (info.rss, info.vms, info.shared)
uss = psutil.Process().memory_full_info().uss
gib = (1024 ** 3)
summary = f'''
"[PID: {os.getpid()}]
[{message}]
Available: {(available / gib):,.1f} / {(total / gib):,.1f}
Free: {(free / gib):,.1f} / {(total / gib):,.1f}
Usage: {(used / gib):,.1f} / {(total / gib):,.1f}
RSS: {(rss / gib):,.1f}
VMS: {(vms / gib):,.1f}
USS: {(uss / gib):,.1f}
SHARED: {(shared / gib):,.1f}
'''.strip().replace('\n', '\t')
print_message(summary, pad=True) |
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return ('MiniProduction(%s)' % self.str)
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func] |
class PourFromCupToCup(Task):
def init_task(self) -> None:
self.drops = []
self.cup_target_base = Dummy('cup_target_base')
self.cup_source = Shape('cup_source')
self.cup_target = Shape('cup_target')
self.cup_source_visual = Shape('cup_source_visual')
self.cup_target_visual = Shape('cup_target_visual')
self.distractors = [Shape(('cup_distractor%d' % i)) for i in range(3)]
self.distractors_vis = [Shape(('cup_distractor_visual%d' % i)) for i in range(3)]
self.success_detector = ProximitySensor('success')
self.register_graspable_objects([self.cup_source])
def init_episode(self, index: int) -> List[str]:
target_index = ((index + 1) % self.variation_count())
(source_name, source_rgb) = colors[index]
(target_name, target_rgb) = colors[target_index]
self.cup_source_visual.set_color(source_rgb)
self.cup_target_visual.set_color(target_rgb)
options = (list(range(index)) + list(range((index + 1), len(colors))))
options.remove(target_index)
color_choices = np.random.choice(options, size=3, replace=False)
for (obj, color_index) in zip(self.distractors_vis, color_choices):
(_, rgb) = colors[color_index]
obj.set_color(rgb)
b = SpawnBoundary([Shape('boundary')])
b.sample(self.cup_source, min_distance=0.12)
b.sample(self.cup_target, min_distance=0.12)
[b.sample(d, min_distance=0.12) for d in self.distractors]
self.cup_target_base.set_orientation(([0.0] * 3))
self.drops = []
conditions = []
for i in range(LIQUID_BALLS):
drop = Shape.create(PrimitiveShape.SPHERE, mass=0.0001, size=[0.005, 0.005, 0.005])
drop.set_parent(self.cup_source)
drop.set_color([0.1, 0.1, 0.9])
drop.set_position(list(np.random.normal(0, 0.0005, size=(3,))), relative_to=self.cup_source)
self.drops.append(drop)
conditions.append(DetectedCondition(drop, self.success_detector))
self.register_success_conditions([ConditionSet(conditions)])
return [('pour liquid from the %s cup to the %s cup' % (source_name, target_name)), ('pour liquid from the %s mug to the %s mug' % (source_name, target_name)), ('pour the contents of the %s mug into the %s one' % (source_name, target_name)), ('pick up the %s cup and pour the liquid into the %s one' % (source_name, target_name))]
def variation_count(self) -> int:
return len(colors)
def base_rotation_bounds(self) -> Tuple[(List[float], List[float])]:
return ([0, 0, 0], [0, 0, 0])
def cleanup(self) -> None:
for d in self.drops:
d.remove()
self.drops.clear() |
def process(queryPack, response):
output = ''
for i in range(len(queryPack)):
output += '{}\t'.format(queryPack[i])
for j in range(len(response[i])):
output += '{} '.format(response[i][j])
output += '\n'
return output |
def save_config_to_file(config, config_file):
with open(config_file, 'w') as fp:
return json.dump(config, fp, indent='\t') |
def report_coverage() -> None:
start_time = time.time()
(options, test_list, interested_folders) = initialization()
get_json_report(test_list, options)
if options.need_summary:
summarize_jsons(test_list, interested_folders, [''], TestPlatform.OSS)
print_time('Program Total Time: ', start_time) |
class StreamingSupport(object):
def supports_streaming(self):
return False
def add_samples(self, X, current=True):
raise NotImplementedError('add_samples() has not been implemented.')
def update_model_from_stream_buffer(self):
raise NotImplementedError('update_model_from_stream_buffer() has not been implemented.') |
def to_bio2(tags):
new_tags = []
for (i, tag) in enumerate(tags):
if (tag in EMPTY_OR_O_TAG):
new_tags.append(tag)
elif (tag[0] == 'I'):
if ((i == 0) or (tags[(i - 1)] == 'O') or (tags[(i - 1)][1:] != tag[1:])):
new_tags.append(('B' + tag[1:]))
else:
new_tags.append(tag)
else:
new_tags.append(tag)
return new_tags |
class RandomSearchMutGaussian(Evolution):
sel_pb: float
init_pb: float
mut_pb: float
mu: float
sigma: float
def __init__(self, container: Container, budget: int, dimension: int, sel_pb: float=0.5, init_pb: float=0.5, mut_pb: float=0.2, mu: float=0.0, sigma: float=1.0, **kwargs):
self.sel_pb = sel_pb
self.init_pb = init_pb
self.mut_pb = mut_pb
self.mu = mu
self.sigma = sigma
def init_fn(base_ind):
return [random.normalvariate(self.mu, self.sigma) for _ in range(self.dimension)]
select_or_initialise = partial(tools.sel_or_init, sel_fn=tools.sel_random, sel_pb=sel_pb, init_fn=init_fn, init_pb=init_pb)
def vary(ind):
return tools.mut_gaussian(ind, mu=self.mu, sigma=self.sigma, mut_pb=self.mut_pb)
super().__init__(container, budget, dimension=dimension, select_or_initialise=select_or_initialise, vary=vary, **kwargs) |
def create_tmp_tables_guard(selects, datasource):
if isinstance(selects, six.string_types):
tables = create_tmp_table_from_select(selects, datasource)
drop_table_list = [tables]
elif isinstance(selects, (list, tuple)):
tables = [create_tmp_table_from_select(s, datasource) for s in selects]
drop_table_list = tables
else:
raise ValueError('not supported types {}'.format(type(selects)))
try:
(yield tables)
finally:
drop_tables(drop_table_list, datasource) |
def enqueue(net, queue, data_blobs, status=None):
if (status is None):
status = net.NextName('status')
queue_blobs = []
for blob in data_blobs:
if (blob not in queue_blobs):
queue_blobs.append(blob)
else:
logger.warning('Need to copy blob {} to enqueue'.format(blob))
queue_blobs.append(net.Copy(blob))
results = net.SafeEnqueueBlobs(([queue] + queue_blobs), (queue_blobs + [status]))
return results[(- 1)] |
class LrUpdaterHook(Hook):
def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.1, warmup_by_epoch=False, **kwargs):
if (warmup is not None):
if (warmup not in ['constant', 'linear', 'exp']):
raise ValueError('"{}" is not a supported type for warming up, valid types are "constant" and "linear"'.format(warmup))
if (warmup is not None):
assert (warmup_iters > 0), '"warmup_iters" must be a positive integer'
assert (0 < warmup_ratio <= 1.0), '"warmup_ratio" must be in range (0,1]'
self.by_epoch = by_epoch
self.warmup = warmup
self.warmup_iters = warmup_iters
self.warmup_ratio = warmup_ratio
self.warmup_by_epoch = warmup_by_epoch
if self.warmup_by_epoch:
self.warmup_epochs = self.warmup_iters
self.warmup_iters = None
else:
self.warmup_epochs = None
self.base_lr = []
self.regular_lr = []
def _set_lr(self, runner, lr_groups):
for (param_group, lr) in zip(runner.optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def get_lr(self, runner, base_lr):
raise NotImplementedError
def get_regular_lr(self, runner):
return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
def get_warmup_lr(self, cur_iters):
if (self.warmup == 'constant'):
warmup_lr = [(_lr * self.warmup_ratio) for _lr in self.regular_lr]
elif (self.warmup == 'linear'):
k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio))
warmup_lr = [(_lr * (1 - k)) for _lr in self.regular_lr]
elif (self.warmup == 'exp'):
k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters)))
warmup_lr = [(_lr * k) for _lr in self.regular_lr]
return warmup_lr
def before_run(self, runner):
for group in runner.optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
self.base_lr = [group['initial_lr'] for group in runner.optimizer.param_groups]
def before_train_epoch(self, runner):
if (not self.by_epoch):
return
if self.warmup_by_epoch:
epoch_len = len(runner.data_loader)
self.warmup_iters = (self.warmup_epochs * epoch_len)
self.regular_lr = self.get_regular_lr(runner)
self._set_lr(runner, self.regular_lr)
def before_train_iter(self, runner):
cur_iter = runner.iter
if (not self.by_epoch):
self.regular_lr = self.get_regular_lr(runner)
if ((self.warmup is None) or (cur_iter >= self.warmup_iters)):
self._set_lr(runner, self.regular_lr)
else:
warmup_lr = self.get_warmup_lr(cur_iter)
self._set_lr(runner, warmup_lr)
elif self.by_epoch:
if ((self.warmup is None) or (cur_iter > self.warmup_iters)):
return
elif (cur_iter == self.warmup_iters):
self._set_lr(runner, self.regular_lr)
else:
warmup_lr = self.get_warmup_lr(cur_iter)
self._set_lr(runner, warmup_lr) |
def find_color_scalar(color_string):
color_dict = {'purple': (255, 0, 255), 'yellow': (0, 255, 255), 'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'skyblue': (235, 206, 135), 'navyblue': (128, 0, 0), 'azure': (255, 255, 240), 'slate': (255, 0, 127), 'chocolate': (30, 105, 210), 'olive': (112, 255, 202), 'orange': (0, 140, 255), 'orchid': (255, 102, 224)}
color_scalar = color_dict[color_string]
return color_scalar |
class APrioriMeshTester():
def __init__(self, mesh: fenics.Mesh):
self.mesh = mesh
dg_function_space = fenics.FunctionSpace(self.mesh, 'DG', 0)
vector_cg_space = fenics.VectorFunctionSpace(self.mesh, 'CG', 1)
dx = fenics.Measure('dx', domain=mesh)
self.transformation_container = fenics.Function(vector_cg_space)
self.A_prior = ((fenics.TrialFunction(dg_function_space) * fenics.TestFunction(dg_function_space)) * dx)
self.l_prior = ((fenics.det((fenics.Identity(self.mesh.geometric_dimension()) + fenics.grad(self.transformation_container))) * fenics.TestFunction(dg_function_space)) * dx)
self.options_prior: _typing.KspOption = {'ksp_type': 'preonly', 'pc_type': 'jacobi', 'pc_jacobi_type': 'diagonal', 'ksp_rtol': 1e-16, 'ksp_atol': 1e-20, 'ksp_max_it': 1000}
def test(self, transformation: fenics.Function, volume_change: float) -> bool:
comm = self.transformation_container.function_space().mesh().mpi_comm()
self.transformation_container.vector().vec().aypx(0.0, transformation.vector().vec())
self.transformation_container.vector().apply('')
x = _utils.assemble_and_solve_linear(self.A_prior, self.l_prior, ksp_options=self.options_prior, comm=comm)
min_det = float(x.min()[1])
max_det = float(x.max()[1])
return bool(((min_det >= (1 / volume_change)) and (max_det <= volume_change))) |
def get_instruct_adapter_spec(num_outputs: int=1, max_tokens: int=512, temperature: float=0.7) -> AdapterSpec:
return AdapterSpec(method=ADAPT_GENERATION, instructions='', input_prefix='', input_suffix='\n', output_prefix='', output_suffix='', max_train_instances=0, num_outputs=num_outputs, max_tokens=max_tokens, temperature=temperature, stop_sequences=[]) |
def test_metric_evaluate_y_pred_zeros():
metrics = create_metric_list(k, np.ones(3))
y_pred = torch.from_numpy(np.zeros((2, 3)))
for metric in metrics:
assert (metric.evaluate(y_true, y_pred) == 0.0) |
def validate_files(file_dict, data_home, verbose):
missing = {}
invalid = {}
for (file_id, file) in tqdm.tqdm(file_dict.items(), disable=(not verbose)):
for clips in file.keys():
if (clips == 'clips'):
continue
else:
filepath = file[clips][0]
checksum = file[clips][1]
if (filepath is not None):
local_path = os.path.join(data_home, filepath)
(exists, valid) = validate(local_path, checksum)
if (not exists):
if (file_id not in missing.keys()):
missing[file_id] = []
missing[file_id].append(local_path)
elif (not valid):
if (file_id not in invalid.keys()):
invalid[file_id] = []
invalid[file_id].append(local_path)
return (missing, invalid) |
def split_text(text: str, n=100, character=' ') -> List[str]:
text = text.split(character)
return [character.join(text[i:(i + n)]).strip() for i in range(0, len(text), n)] |
def test_nullable_ref(testdir):
testdir.make_test('\(method="POST")\(max_examples=1)\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n assert case.path == "/users"\n assert case.method == "POST"\n assert case.body is None\n', paths={'/users': {'post': {'parameters': [{'in': 'body', 'name': 'attributes', 'schema': {'$ref': '#/definitions/NullableIntRef'}, 'required': True}], 'responses': {'200': {'description': 'OK'}}}}}, definitions={'NullableIntRef': {'type': 'integer', 'x-nullable': True}})
result = testdir.runpytest('-v', '-s')
result.assert_outcomes(passed=1)
result.stdout.re_match_lines(['Hypothesis calls: 1$']) |
class VermaModuleMorphism(Morphism):
def __init__(self, parent, scalar):
self._scalar = scalar
Morphism.__init__(self, parent)
def _repr_type(self):
return 'Verma module'
def _repr_defn(self):
v = self.domain().highest_weight_vector()
if (not self._scalar):
return '{} |--> {}'.format(v, self.codomain().zero())
return '{} |--> {}'.format(v, (self._scalar * self.parent().singular_vector()))
def _richcmp_(self, other, op):
return richcmp(self._scalar, other._scalar, op)
def _call_(self, x):
if ((not self._scalar) or (self.parent().singular_vector() is None)):
return self.codomain().zero()
mc = x.monomial_coefficients(copy=False)
return self.codomain().linear_combination(((self._on_basis(m), (self._scalar * c)) for (m, c) in mc.items()))
def _on_basis(self, m):
pbw = self.codomain()._pbw
return (pbw.monomial(pbw._indices(m.dict())) * self.parent().singular_vector())
def _add_(self, other):
return type(self)(self.parent(), (self._scalar + other._scalar))
def _sub_(self, other):
return type(self)(self.parent(), (self._scalar - other._scalar))
def _acted_upon_(self, other, self_on_left):
R = self.parent().base_ring()
if (other not in R):
return None
return type(self)(self.parent(), (R(other) * self._scalar))
def _composition_(self, right, homset):
if (isinstance(right, VermaModuleMorphism) and (right.domain()._g is self.codomain()._g)):
return homset.element_class(homset, (right._scalar * self._scalar))
return super()._composition_(right, homset)
def is_injective(self):
return ((self.parent().singular_vector() is not None) and bool(self._scalar))
def is_surjective(self):
return ((self.domain() == self.codomain()) and bool(self._scalar)) |
class rdist_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('c', False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return ((- np.log(2)) + beta._logpdf(((x + 1) / 2), (c / 2), (c / 2)))
def _cdf(self, x, c):
return beta._cdf(((x + 1) / 2), (c / 2), (c / 2))
def _sf(self, x, c):
return beta._sf(((x + 1) / 2), (c / 2), (c / 2))
def _ppf(self, q, c):
return ((2 * beta._ppf(q, (c / 2), (c / 2))) - 1)
def _rvs(self, c, size=None, random_state=None):
return ((2 * random_state.beta((c / 2), (c / 2), size)) - 1)
def _munp(self, n, c):
numerator = ((1 - (n % 2)) * sc.beta(((n + 1.0) / 2), (c / 2.0)))
return (numerator / sc.beta((1.0 / 2), (c / 2.0))) |
_config
def fixed_mdp_rnd_init():
LOCAL_TESTING = False
fixed_mdp = True
layout_name = 'scenario2'
sim_threads = (10 if LOCAL_TESTING else 50)
PPO_RUN_TOT_TIMESTEPS = 24000
TOTAL_BATCH_SIZE = 8000
STEPS_PER_UPDATE = 4
MINIBATCHES = 4
LR = 0.0005 |
def interpolate_3D(input, size=None, scale_factor=None, interpolation='trilinear'):
assert (input.dim() == 5), 'input must be 5D'
scaled = F.interpolate(input, size=size, scale_factor=scale_factor, mode=interpolation, align_corners=True)
return scaled |
class Function_psi2(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'psi', nargs=2, latex_name='\\psi', conversions=dict(mathematica='PolyGamma', sympy='polygamma', maple='Psi', giac='Psi', fricas='polygamma'))
def _maxima_init_evaled_(self, *args):
args_maxima = []
for a in args:
if isinstance(a, str):
args_maxima.append(a)
elif hasattr(a, '_maxima_init_'):
args_maxima.append(a._maxima_init_())
else:
args_maxima.append(str(a))
(n, x) = args_maxima
return ('psi[%s](%s)' % (n, x)) |
class TestKerasBaseActivationsQuantizer(BaseKerasTrainableInfrastructureTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_activation_quantization_config(self):
return TrainableQuantizerActivationConfig(activation_quantization_method=QuantizationMethod.UNIFORM, activation_n_bits=8, activation_quantization_params={}, enable_activation_quantization=True, min_threshold=0)
def run_test(self):
with self.unit_test.assertRaises(Exception) as e:
ZeroActivationsQuantizer(self.get_activation_quantization_config())
self.unit_test.assertEqual(f'Quantization method mismatch expected: [<QuantizationMethod.POWER_OF_TWO: 0>, <QuantizationMethod.SYMMETRIC: 3>] and got QuantizationMethod.UNIFORM', str(e.exception))
with self.unit_test.assertRaises(Exception) as e:
ZeroActivationsQuantizer(self.get_weights_quantization_config())
self.unit_test.assertEqual(f'Expect activation quantization got weight', str(e.exception))
activation_quantization_config = super(TestKerasBaseActivationsQuantizer, self).get_activation_quantization_config()
quantizer = ZeroActivationsQuantizer(activation_quantization_config)
self.unit_test.assertTrue((quantizer.quantization_config == activation_quantization_config))
config_data = config_serialization(activation_quantization_config)
self.unit_test.assertTrue(config_data['enable_activation_quantization'])
deserialized_config = config_deserialization(config_data)
self.unit_test.assertTrue((activation_quantization_config.__dict__ == deserialized_config.__dict__)) |
def cython_compile(path_pattern, options):
pool = None
all_paths = map(os.path.abspath, extended_iglob(path_pattern))
try:
for path in all_paths:
if options.build_inplace:
base_dir = path
while ((not os.path.isdir(base_dir)) or is_package_dir(base_dir)):
base_dir = os.path.dirname(base_dir)
else:
base_dir = None
if os.path.isdir(path):
paths = [os.path.join(path, '**', '*.{py,pyx}')]
else:
paths = [path]
ext_modules = cythonize(paths, nthreads=options.parallel, exclude_failures=options.keep_going, exclude=options.excludes, compiler_directives=options.directives, compile_time_env=options.compile_time_env, force=options.force, quiet=options.quiet, depfile=options.depfile, **options.options)
if (ext_modules and options.build):
if ((len(ext_modules) > 1) and (options.parallel > 1)):
if (pool is None):
try:
pool = multiprocessing.Pool(options.parallel)
except OSError:
pool = _FakePool()
pool.map_async(run_distutils, [(base_dir, [ext]) for ext in ext_modules])
else:
run_distutils((base_dir, ext_modules))
except:
if (pool is not None):
pool.terminate()
raise
else:
if (pool is not None):
pool.close()
pool.join() |
class BiasedMF(RecModel):
def _init_weights(self):
self.uid_embeddings = torch.nn.Embedding(self.user_num, self.ui_vector_size)
self.iid_embeddings = torch.nn.Embedding(self.item_num, self.ui_vector_size)
self.user_bias = torch.nn.Embedding(self.user_num, 1)
self.item_bias = torch.nn.Embedding(self.item_num, 1)
self.global_bias = torch.nn.Parameter(torch.tensor(0.1))
self.l2_embeddings = ['uid_embeddings', 'iid_embeddings', 'user_bias', 'item_bias']
def predict(self, feed_dict):
(check_list, embedding_l2) = ([], [])
u_ids = feed_dict[UID]
i_ids = feed_dict[IID]
u_bias = self.user_bias(u_ids).view([(- 1)])
i_bias = self.item_bias(i_ids).view([(- 1)])
embedding_l2.extend([u_bias, i_bias])
cf_u_vectors = self.uid_embeddings(u_ids)
cf_i_vectors = self.iid_embeddings(i_ids)
embedding_l2.extend([cf_u_vectors, cf_i_vectors])
prediction = (cf_u_vectors * cf_i_vectors).sum(dim=1).view([(- 1)])
prediction = (((prediction + u_bias) + i_bias) + self.global_bias)
check_list.append(('prediction', prediction))
out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2}
return out_dict |
def test_edge_bundling():
params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 500, 'seed': 1, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'edge_style': 'bundled', 'node_style': 'force_atlas', 'fa_iter': 200, 'plot_transition': True, 'gif_animation': False}
run_test(params) |
_node_type()
class DipoleSource(optplan.EmSource):
type = schema_utils.polymorphic_model_type('source.dipole_source')
position = optplan.vec3d()
axis = types.IntType()
phase = types.FloatType()
power = types.FloatType()
normalize_by_sim = types.BooleanType(default=False) |
class Baseline(abc.ABC):
def __init__(self, env_spec):
self._mdp_spec = env_spec
def get_param_values(self):
def set_param_values(self, flattened_params):
def fit(self, paths):
def predict(self, path):
def log_diagnostics(self, paths): |
class BaseWaterRetention(NonLinearModel):
def plot(self, ax=None):
import matplotlib.pyplot as plt
if (ax is None):
plt.figure()
ax = plt.subplot(111)
h = (- np.logspace((- 2), 3, 1000))
ax.semilogx((- h), self(h))
ax.set_title('Water retention curve')
ax.set_xlabel('Soil water potential, $-\\psi$')
ax.set_ylabel('Water content, $\\theta$') |
def getEncryptionKey(data, key):
cipher = AES.new(key, AES.MODE_CBC, IV)
return cipher.encrypt(pad(data, AES.block_size)) |
def mp_fn(_: int, cfg: PretrainConfig) -> None:
torch.set_default_tensor_type('torch.FloatTensor')
xpretrain(cfg) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys())))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help=('Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(MODEL_CLASSES.keys())))
parser.add_argument('--prompt', type=str, default='')
parser.add_argument('--multi_prompt', action='store_true', help='Use multiple prompts')
parser.add_argument('--multi_prompt_file_name', type=str, default=None, help='File name of prompts, one per line')
parser.add_argument('--output_gen_file_name', type=str, default=None, help='File name where model completions given the prompts are stored, one per line')
parser.add_argument('--length', type=int, default=20)
parser.add_argument('--stop_token', type=str, default=None, help='Token at which text generation is stopped')
parser.add_argument('--temperature', type=float, default=1.0, help='temperature of 1.0 has no effect, lower tend toward greedy sampling')
parser.add_argument('--repetition_penalty', type=float, default=1.0, help='primarily useful for CTRL model; in that case, use 1.2')
parser.add_argument('--k', type=int, default=0)
parser.add_argument('--p', type=float, default=0.9)
parser.add_argument('--padding_text', type=str, default='', help='Padding text for Transfo-XL and XLNet.')
parser.add_argument('--xlm_language', type=str, default='', help='Optional language when used with the XLM model.')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--num_return_sequences', type=int, default=1, help='The number of samples to generate.')
args = parser.parse_args()
args.device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = torch.cuda.device_count()
set_seed(args)
try:
args.model_type = args.model_type.lower()
(model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
except KeyError:
raise KeyError('the model {} you specified is not supported. You are welcome to add it and open a PR :)')
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings)
logger.info(args)
if (not args.multi_prompt):
prompt_text = (args.prompt if args.prompt else input('Model prompt >>> '))
requires_preprocessing = (args.model_type in PREPROCESSING_FUNCTIONS.keys())
if requires_preprocessing:
prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)
preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)
encoded_prompt = tokenizer.encode(preprocessed_prompt_text, add_special_tokens=False, return_tensors='pt', add_space_before_punct_symbol=True)
else:
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors='pt')
encoded_prompt = encoded_prompt.to(args.device)
output_sequences = model.generate(input_ids=encoded_prompt, max_length=min((args.length + len(encoded_prompt[0])), model.config.max_position_embeddings), temperature=args.temperature, top_k=args.k, top_p=args.p, repetition_penalty=args.repetition_penalty, do_sample=True, num_return_sequences=args.num_return_sequences)
if (len(output_sequences.shape) > 2):
output_sequences.squeeze_()
generated_sequences = []
for (generated_sequence_idx, generated_sequence) in enumerate(output_sequences):
print('=== GENERATED SEQUENCE {} ==='.format((generated_sequence_idx + 1)))
generated_sequence = generated_sequence.tolist()
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
text = text[:(text.find(args.stop_token) if args.stop_token else None)]
total_sequence = (prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):])
generated_sequences.append(total_sequence)
print(total_sequence)
return generated_sequences
else:
prompt_texts = read_prompts_from_file(args.multi_prompt_file_name)
output_prompt_results = []
for (i, prompt_text) in enumerate(prompt_texts):
if ((i % 10) == 0):
print('Completing Input ', i)
requires_preprocessing = (args.model_type in PREPROCESSING_FUNCTIONS.keys())
if requires_preprocessing:
prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)
preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)
encoded_prompt = tokenizer.encode(preprocessed_prompt_text, add_special_tokens=False, return_tensors='pt', add_space_before_punct_symbol=True)
else:
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors='pt')
encoded_prompt = encoded_prompt.to(args.device)
generated_sequences = []
effective_max_length = min((args.length + len(encoded_prompt[0])), model.config.max_position_embeddings)
if (args.num_return_sequences <= 20):
output_sequences = model.generate(input_ids=encoded_prompt, max_length=effective_max_length, temperature=args.temperature, top_k=args.k, top_p=args.p, repetition_penalty=args.repetition_penalty, do_sample=True, num_return_sequences=args.num_return_sequences)
else:
seq_frags = [20 for k in range(int((args.num_return_sequences / 20)))]
if ((args.num_return_sequences % 20) > 0):
seq_frags.append((args.num_return_sequences % 20))
output_sequences = None
output_sequences_frags = []
for seq_frag in seq_frags:
output_sequences_frag = model.generate(input_ids=encoded_prompt, max_length=effective_max_length, temperature=args.temperature, top_k=args.k, top_p=args.p, repetition_penalty=args.repetition_penalty, do_sample=True, num_return_sequences=seq_frag)
output_sequences_frags.append(output_sequences_frag)
output_sequences = torch.cat(output_sequences_frags, dim=0)
if (len(output_sequences.shape) > 2):
output_sequences.squeeze_()
for (generated_sequence_idx, generated_sequence) in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
text = text[:(text.find(args.stop_token) if args.stop_token else None)]
cleaned_text = text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
total_sequence = (prompt_text + cleaned_text)
generated_sequences.append(cleaned_text)
generated_sequence_consolidated_str = ' <CAND_SEP> '.join(generated_sequences)
output_prompt_results.append(generated_sequence_consolidated_str)
if ((i % 10) == 0):
print('Writing Generations To File ', args.output_gen_file_name)
output_file = open(args.output_gen_file_name, 'a')
for output_prompt_result in output_prompt_results:
output_prompt_result = ' '.join(output_prompt_result.split())
output_file.write((output_prompt_result + '\n'))
output_file.close()
print('Finished Writing')
output_prompt_results = []
if args.output_gen_file_name:
print('Writing Generations To File ', args.output_gen_file_name)
output_file = open(args.output_gen_file_name, 'a')
for output_prompt_result in output_prompt_results:
output_prompt_result = ' '.join(output_prompt_result.split())
output_file.write((output_prompt_result + '\n'))
output_file.close()
print('Finished Writing')
return output_prompt_results |
def load_i3d_pretrained(device=torch.device('cpu')):
from fvd.pytorch_i3d import InceptionI3d
i3d = InceptionI3d(400, in_channels=3).to(device)
filepath = download(_I3D_PRETRAINED_ID, 'i3d_pretrained_400.pt')
i3d.load_state_dict(torch.load(filepath, map_location=device))
i3d.eval()
return i3d |
class Conv2d(_ConvNd):
__doc__ = (('Applies a 2D convolution over an input signal composed of several input\n planes.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{\\text{in}}, H, W)` and output :math:`(N, C_{\\text{out}}, H_{\\text{out}}, W_{\\text{out}})`\n can be precisely described as:\n\n .. math::\n \\text{out}(N_i, C_{\\text{out}_j}) = \\text{bias}(C_{\\text{out}_j}) +\n \\sum_{k = 0}^{C_{\\text{in}} - 1} \\text{weight}(C_{\\text{out}_j}, k) \\star \\text{input}(N_i, k)\n\n\n where :math:`\\star` is the valid 2D `cross-correlation`_ operator,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n ' + "\n\n This module supports :ref:`TensorFloat32<tf32_on_ampere>`.\n\n * :attr:`stride` controls the stride for the cross-correlation, a single\n number or a tuple.\n\n * :attr:`padding` controls the amount of padding applied to the input. It\n can be either a string {{'valid', 'same'}} or a tuple of ints giving the\n amount of implicit padding applied on both sides.\n\n * :attr:`dilation` controls the spacing between the kernel points; also\n known as the a trous algorithm. It is harder to describe, but this `link`_\n has a nice visualization of what :attr:`dilation` does.\n\n {groups_note}\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n\n - a single ``int`` -- in which case the same value is used for the height and width dimension\n - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n and the second `int` for the width dimension\n\n Note:\n {depthwise_separable_note}\n\n Note:\n {cudnn_reproducibility_note}\n\n Note:\n ``padding='valid'`` is the same as no padding. ``padding='same'`` pads\n the input so the output has the shape as the input. However, this mode\n doesn't support any stride values other than 1.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int, tuple or str, optional): Padding added to all four sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n ".format(**reproducibility_notes, **convolution_notes)) + '\n\n Shape:\n - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n\n .. math::\n H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]\n \\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor\n\n .. math::\n W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]\n \\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n :math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`\n :math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.\n The values of these weights are sampled from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`\n bias (Tensor): the learnable bias of the module of shape\n (out_channels). If :attr:`bias` is ``True``,\n then the values of these weights are\n sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`\n\n Examples:\n\n >>> # With square kernels and equal stride\n >>> m = nn.Conv2d(16, 33, 3, stride=2)\n >>> # non-square kernels and unequal stride and with padding\n >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))\n >>> # non-square kernels and unequal stride and with padding and dilation\n >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n >>> input = torch.randn(20, 16, 50, 100)\n >>> output = m(input)\n\n .. _cross-correlation:\n .. _link:\n ')
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size_ = _pair(kernel_size)
stride_ = _pair(stride)
padding_ = (padding if isinstance(padding, str) else _pair(padding))
dilation_ = _pair(dilation)
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if (self.padding_mode != 'zeros'):
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), weight, bias, self.stride, _pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias) |
def test_drop_overlapping_pitch_bends() -> None:
note_events_with_pitch_bends = [(0.0, 0.1, 60, 1.0, None), (2.0, 2.1, 62, 1.0, [0, 1, 2]), (2.0, 2.1, 64, 1.0, [0, 1, 2]), (1.0, 1.1, 65, 1.0, [0, 1, 2]), (1.1, 1.2, 67, 1.0, [0, 1, 2]), (3.0, 3.2, 69, 1.0, [0, 1, 2]), (3.1, 3.3, 71, 1.0, [0, 1, 2]), (5.0, 5.1, 72, 1.0, [0, 1, 2]), (5.0, 5.2, 74, 1.0, [0, 1, 2]), (4.0, 4.2, 76, 1.0, [0, 1, 2]), (4.1, 4.2, 77, 1.0, [0, 1, 2])]
expected = [(0.0, 0.1, 60, 1.0, None), (2.0, 2.1, 62, 1.0, None), (2.0, 2.1, 64, 1.0, None), (1.0, 1.1, 65, 1.0, [0, 1, 2]), (1.1, 1.2, 67, 1.0, [0, 1, 2]), (3.0, 3.2, 69, 1.0, None), (3.1, 3.3, 71, 1.0, None), (5.0, 5.1, 72, 1.0, None), (5.0, 5.2, 74, 1.0, None), (4.0, 4.2, 76, 1.0, None), (4.1, 4.2, 77, 1.0, None)]
result = drop_overlapping_pitch_bends(note_events_with_pitch_bends)
assert (sorted(result) == sorted(expected)) |
class TestBloomWindowService():
TEST_TOKEN_IDS: List[int] = [2175, 27149, 613, 30469, 664, 16289, 168358, 375, 12990, 76143, 12, 632, 660, 168734, 1912, 51298, 34181, 1800, 461, 368, 112640, 31036, 613, 22256, 7833, 21830, 376, 200008, 116891, 375, 43, 19540, 12, 861, 83174, 427, 5219, 20079, 136458, 361, 368, 12589, 15, 11468, 15, 530, 94369, 461, 88734, 20038, 17]
def setup_method(self):
self.path: str = tempfile.mkdtemp()
service: TokenizerService = get_tokenizer_service(self.path)
self.window_service = WindowServiceFactory.get_window_service('together/bloom', service)
def teardown_method(self, method):
shutil.rmtree(self.path)
def test_max_request_length(self):
assert (self.window_service.max_request_length == 2049)
def test_encode(self):
assert (self.window_service.encode(TEST_PROMPT).token_values == TestBloomWindowService.TEST_TOKEN_IDS)
def test_decode(self):
assert (self.window_service.decode(self.window_service.encode(TEST_PROMPT).tokens) == TEST_PROMPT)
def test_tokenize(self):
assert (self.window_service.tokenize(TEST_PROMPT) == ['The', ' Center', ' for', ' Research', ' on', ' Foundation', ' Models', ' (', 'CR', 'FM', ')', ' is', ' an', ' interdisciplin', 'ary', ' initiative', ' born', ' out', ' of', ' the', ' Stanford', ' Institute', ' for', ' Human', '-C', 'enter', 'ed', ' Artificial', ' Intelligence', ' (', 'H', 'AI', ')', ' that', ' aims', ' to', ' make', ' fundamental', ' advances', ' in', ' the', ' study', ',', ' development', ',', ' and', ' deployment', ' of', ' foundation', ' models', '.'])
def test_tokenize_and_count(self):
assert (self.window_service.get_num_tokens(TEST_PROMPT) == 51)
def test_fits_within_context_window(self):
assert self.window_service.fits_within_context_window(TEST_PROMPT, (self.window_service.max_request_length - 51))
assert (not self.window_service.fits_within_context_window(TEST_PROMPT, ((self.window_service.max_request_length - 51) + 1)))
def test_truncate_from_right(self):
long_prompt: str = (TEST_PROMPT * 41)
assert (not self.window_service.fits_within_context_window(long_prompt))
truncated_long_prompt: str = self.window_service.truncate_from_right(long_prompt)
assert (self.window_service.get_num_tokens(truncated_long_prompt) == self.window_service.max_request_length)
assert self.window_service.fits_within_context_window(truncated_long_prompt) |
def filter_collate(batch):
if isinstance(batch, list):
batch = [i for i in batch if (i is not None)]
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
elem = batch[0]
if (elem_type.__name__ == 'ndarray'):
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(error_msg_fmt.format(elem.dtype))
return filter_collate([torch.from_numpy(b) for b in batch])
if (elem.shape == ()):
py_type = (float if elem.dtype.name.startswith('float') else int)
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: filter_collate([d[key] for d in batch]) for key in batch[0]}
elif (isinstance(batch[0], tuple) and hasattr(batch[0], '_fields')):
return type(batch[0])(*(filter_collate(samples) for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence):
transposed = zip(*batch)
return [filter_collate(samples) for samples in transposed]
raise TypeError(error_msg_fmt.format(type(batch[0]))) |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Ns3Ipv6Address_Ns3Ipv6Address_Unsigned_char_Ns3Ptr__lt__ns3Ipv6Route__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet >, ns3::Ipv6Address, ns3::Ipv6Address, unsigned char, ns3::Ptr< ns3::Ipv6Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::Packet >', 'arg0'), param('ns3::Ipv6Address', 'arg1'), param('ns3::Ipv6Address', 'arg2'), param('unsigned char', 'arg3'), param('ns3::Ptr< ns3::Ipv6Route >', 'arg4')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
class CosineAnnealingWarmRestarts(_LRScheduler):
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=(- 1), verbose=False):
if ((T_0 <= 0) or (not isinstance(T_0, int))):
raise ValueError('Expected positive integer T_0, but got {}'.format(T_0))
if ((T_mult < 1) or (not isinstance(T_mult, int))):
raise ValueError('Expected integer T_mult >= 1, but got {}'.format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch, verbose)
self.T_cur = self.last_epoch
def get_lr(self):
if (not self._get_lr_called_within_step):
warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning)
return [(self.eta_min + (((base_lr - self.eta_min) * (1 + math.cos(((math.pi * self.T_cur) / self.T_i)))) / 2)) for base_lr in self.base_lrs]
def step(self, epoch=None):
if ((epoch is None) and (self.last_epoch < 0)):
epoch = 0
if (epoch is None):
epoch = (self.last_epoch + 1)
self.T_cur = (self.T_cur + 1)
if (self.T_cur >= self.T_i):
self.T_cur = (self.T_cur - self.T_i)
self.T_i = (self.T_i * self.T_mult)
else:
if (epoch < 0):
raise ValueError('Expected non-negative epoch, but got {}'.format(epoch))
if (epoch >= self.T_0):
if (self.T_mult == 1):
self.T_cur = (epoch % self.T_0)
else:
n = int(math.log((((epoch / self.T_0) * (self.T_mult - 1)) + 1), self.T_mult))
self.T_cur = (epoch - ((self.T_0 * ((self.T_mult ** n) - 1)) / (self.T_mult - 1)))
self.T_i = (self.T_0 * (self.T_mult ** n))
else:
self.T_i = self.T_0
self.T_cur = epoch
self.last_epoch = math.floor(epoch)
class _enable_get_lr_call():
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
for (i, data) in enumerate(zip(self.optimizer.param_groups, self.get_lr())):
(param_group, lr) = data
param_group['lr'] = lr
self.print_lr(self.verbose, i, lr, epoch)
self._last_lr = [group['lr'] for group in self.optimizer.param_groups] |
_function_dispatch(_pv_dispatcher)
def pv(rate, nper, pmt, fv=0, when='end'):
when = _convert_when(when)
(rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when])
temp = ((1 + rate) ** nper)
fact = np.where((rate == 0), nper, (((1 + (rate * when)) * (temp - 1)) / rate))
return ((- (fv + (pmt * fact))) / temp) |
def standard_confusion_matrix(y_test, y_test_pred):
[[tn, fp], [fn, tp]] = confusion_matrix(y_test.cpu().numpy(), y_test_pred)
return np.array([[tp, fp], [fn, tn]]) |
def _handle_boundaries(schema: dict[(str, Any)]) -> dict[(str, Any)]:
for (boundary_name, boundary_exclusive_name) in (('maximum', 'exclusiveMaximum'), ('minimum', 'exclusiveMinimum')):
value = schema.get(boundary_exclusive_name)
if (isinstance(value, (int, float)) and (not isinstance(value, bool))):
schema[boundary_exclusive_name] = True
schema[boundary_name] = value
return schema |
def weighted_signal_distortion_ratio_loss(output, bd):
y = bd['y']
z = bd['z']
y_hat = output
z_hat = (bd['x'] - y_hat)
y_norm = torch.norm(y, dim=(- 1)).squeeze(1)
z_norm = torch.norm(z, dim=(- 1)).squeeze(1)
y_hat_norm = torch.norm(y_hat, dim=(- 1)).squeeze(1)
z_hat_norm = torch.norm(z_hat, dim=(- 1)).squeeze(1)
def loss_sdr(a, a_hat, a_norm, a_hat_norm):
return (dotproduct(a, a_hat) / ((a_norm * a_hat_norm) + 1e-08))
alpha = (y_norm.pow(2) / ((y_norm.pow(2) + z_norm.pow(2)) + 1e-08))
loss_wSDR = (((- alpha) * loss_sdr(y, y_hat, y_norm, y_hat_norm)) - ((1 - alpha) * loss_sdr(z, z_hat, z_norm, z_hat_norm)))
return loss_wSDR.mean() |
class StarReLU(nn.Module):
def __init__(self, scale_value=1.0, bias_value=0.0, scale_learnable=True, bias_learnable=True, mode=None, inplace=False):
super().__init__()
self.inplace = inplace
self.relu = nn.ReLU(inplace=inplace)
self.scale = nn.Parameter((scale_value * torch.ones(1)), requires_grad=scale_learnable)
self.bias = nn.Parameter((bias_value * torch.ones(1)), requires_grad=bias_learnable)
def forward(self, x):
return ((self.scale * (self.relu(x) ** 2)) + self.bias) |
def get_dataset(imagenet_stats=False, resize=224, scale=None, offset=None):
if imagenet_stats:
norm_layer = get_normalization_layer(imagenet_stats)
elif (scale and offset):
norm_layer = get_normalization_layer(imagenet_stats, scale, offset)
else:
norm_layer = get_normalization_layer()
imagenet_r = tfds.load('imagenet_r', split='test', as_supervised=True)
imagenet_r = imagenet_r.map((lambda x, y: (tf.image.resize(x, (resize, resize)), y))).batch(BATCH_SIZE).map(preprocess_image(norm_layer), num_parallel_calls=True).prefetch(AUTO)
return imagenet_r |
def print_perform(ref, pred):
print('BLEU: {:.3f}, F1: {:.2f}, Distinct-1: {:.2f}, Distinct-2: {:.2f}'.format(eval_bleu(ref, pred), (eval_f1(ref, pred) * 100), eval_distinct(pred, 1), eval_distinct(pred, 2)), end=' ')
print('BLEU 1, 2, 3, 4: {}'.format(eval_bleu_detail(ref, pred)), end=' ')
print('Entropy-1: {:.2f}'.format(eval_entropy(pred, 1)), end=' ')
print('Entropy-2: {:.2f}'.format(eval_entropy(pred, 2))) |
class AttentionBlock(nn.Module):
def __init__(self, channels: int, num_head_channels: Optional[int]=None, num_groups: int=32, rescale_output_factor: float=1.0, eps: float=1e-05):
super().__init__()
self.channels = channels
self.num_heads = ((channels // num_head_channels) if (num_head_channels is not None) else 1)
self.num_head_size = num_head_channels
self.group_norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=eps, affine=True)
self.query = nn.Linear(channels, channels)
self.key = nn.Linear(channels, channels)
self.value = nn.Linear(channels, channels)
self.rescale_output_factor = rescale_output_factor
self.proj_attn = nn.Linear(channels, channels, 1)
def transpose_for_scores(self, projection: torch.Tensor) -> torch.Tensor:
new_projection_shape = (projection.size()[:(- 1)] + (self.num_heads, (- 1)))
new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3)
return new_projection
def forward(self, hidden_states):
residual = hidden_states
(batch, channel, height, width) = hidden_states.shape
hidden_states = self.group_norm(hidden_states)
hidden_states = hidden_states.view(batch, channel, (height * width)).transpose(1, 2)
query_proj = self.query(hidden_states)
key_proj = self.key(hidden_states)
value_proj = self.value(hidden_states)
query_states = self.transpose_for_scores(query_proj)
key_states = self.transpose_for_scores(key_proj)
value_states = self.transpose_for_scores(value_proj)
scale = (1 / math.sqrt(math.sqrt((self.channels / self.num_heads))))
attention_scores = torch.matmul((query_states * scale), (key_states.transpose((- 1), (- 2)) * scale))
attention_probs = torch.softmax(attention_scores.float(), dim=(- 1)).type(attention_scores.dtype)
hidden_states = torch.matmul(attention_probs, value_states)
hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous()
new_hidden_states_shape = (hidden_states.size()[:(- 2)] + (self.channels,))
hidden_states = hidden_states.view(new_hidden_states_shape)
hidden_states = self.proj_attn(hidden_states)
hidden_states = hidden_states.transpose((- 1), (- 2)).reshape(batch, channel, height, width)
hidden_states = ((hidden_states + residual) / self.rescale_output_factor)
return hidden_states |
def dropnoise(fd):
foutt2s = []
for d in fd:
diffi = d.strip().split()
winsize = 1
ndups = ((len(diffi) // 8) if (winsize == 1) else (len(diffi) // 11))
if ((ndups != 0) and (len(diffi) != 0)):
idces = set(np.random.choice(len(diffi), size=(ndups,), replace=False))
outsent = []
for (idx, word) in enumerate(diffi):
wrd = ' '.join(diffi[idx:(idx + winsize)])
if (idx not in idces):
outsent.append(wrd)
foutt2s.append(' '.join(outsent))
elif (ndups == 0):
foutt2s.append(d.strip())
return foutt2s |
def save_model(model, directory, metadata=None, filename=MODEL_FILENAME):
device = next(model.parameters()).device
model.cpu()
if (metadata is None):
metadata = dict(img_size=model.img_size, latent_dim=model.latent_dim, model_type=model.model_type)
save_metadata(metadata, directory)
path_to_model = os.path.join(directory, filename)
torch.save(model.state_dict(), path_to_model)
model.to(device) |
def save_chunks_speaker(spkr):
print(spkr)
audio = combine((__CORPUSPATH__ + spkr))
chunks = split(audio)
save_chunks(chunks, (__OUTPATH__ + spkr)) |
class ScionRouter(Router):
__interfaces: Dict[(int, Dict)]
__next_port: int
def __init__(self):
super().__init__()
self.initScionRouter()
def initScionRouter(self):
self.__interfaces = {}
self.__next_port = 50000
def addScionInterface(self, ifid: int, iface: Dict) -> None:
assert (ifid not in self.__interfaces), f'interface {ifid} already exists'
self.__interfaces[ifid] = iface
def getScionInterface(self, ifid: int) -> Dict:
assert (ifid in self.__interfaces), f'interface {ifid} does not exist'
return self.__interfaces[ifid]
def getScionInterfaces(self) -> Dict[(int, Dict)]:
return self.__interfaces
def getNextPort(self) -> int:
port = self.__next_port
self.__next_port += 1
return port |
class RandomShortPoleCartPole(ModifiableCartPoleEnv):
def __init__(self):
super(RandomShortPoleCartPole, self).__init__()
self.length = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_LENGTH, self.EXTREME_UPPER_LENGTH, self.RANDOM_LOWER_LENGTH, self.RANDOM_UPPER_LENGTH)
self._followup()
def reset(self, new=True):
self.state = self.np_random.uniform(low=(- 0.05), high=0.05, size=(4,))
self.steps_beyond_done = None
if new:
self.length = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_LENGTH, self.EXTREME_UPPER_LENGTH, self.RANDOM_LOWER_LENGTH, self.RANDOM_UPPER_LENGTH)
self._followup()
return np.array(self.state)
def parameters(self):
parameters = super(RandomShortPoleCartPole, self).parameters
parameters.update({'length': self.length})
return parameters |
class StringMatchToken(ElementSetToken):
def __init__(self, token, classes=None):
super(StringMatchToken, self).__init__(classes)
assert (token.return_type == unicode)
self._token = token
def _execute(self, env):
s = self._token.execute(env)
processed_s = strip_whitespace(strip_punctuation(s))
matched_elements = set()
for dom in env.elements:
if ((dom.text is not None) and self._class_match(dom)):
processed_text = strip_whitespace(strip_punctuation(dom.text))
if self._string_match(processed_s, processed_text):
matched_elements.add(dom)
return ElementSet(matched_elements)
def _string_match(self, token_result, dom_text):
raise NotImplementedError() |
def main(args):
print('Loading models...')
TOKENIZER_GPT2 = load_tokenizer_for_causal_lm('gpt2')
MODEL_GPT2 = load_model_for_causal_lm('gpt2', device)
MODEL_GPT2_XL = load_model_for_causal_lm('gpt2-xl', device)
print('GPT2 and GPT2-XL models loaded!')
seq_len = 256
logits_warper = LogitsProcessorList([DecayingTemperatureWarper(10.0)])
num_batches = int(math.ceil((args.N / args.batch_size)))
new_tot = (num_batches * args.batch_size)
generated_samples = []
scores = defaultdict(list)
with tqdm(total=new_tot) as pbar:
for batch in range(num_batches):
prompts = ([TOKENIZER_GPT2.eos_token] * args.batch_size)
inputs = TOKENIZER_GPT2(prompts, return_tensors='pt', padding=True).to(device)
generated_sequences = MODEL_GPT2_XL.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, max_length=seq_len, do_sample=True, logits_processor=logits_warper, renormalize_logits=True)
generated_texts = TOKENIZER_GPT2.batch_decode(generated_sequences, skip_special_tokens=True)
for text in generated_texts:
perplexity_gpt2_xl = calculate_perplexity(text, MODEL_GPT2_XL, TOKENIZER_GPT2, device)
perplexity_gpt2 = calculate_perplexity(text, MODEL_GPT2, TOKENIZER_GPT2, device)
perplexity_gpt2_xl_lower = calculate_perplexity(text.lower(), MODEL_GPT2_XL, TOKENIZER_GPT2, device)
zlib_entropy = len(zlib.compress(bytes(text, 'utf-8')))
perplexity_gpt2_xl_window = calculate_perplexity(text.lower(), MODEL_GPT2_XL, TOKENIZER_GPT2, device)
generated_samples.append(text)
scores['XL'].append(perplexity_gpt2_xl.cpu())
scores['SMALL'].append(perplexity_gpt2.cpu())
scores['ZLIB'].append(zlib_entropy)
scores['LOWER'].append(perplexity_gpt2_xl_lower.cpu())
scores['WINDOW'].append(perplexity_gpt2_xl_window.cpu())
pbar.update(args.batch_size)
print(len(scores['XL']))
scores['XL'] = np.asarray(scores['XL'])
scores['SMALL'] = np.asarray(scores['SMALL'])
scores['ZLIB'] = np.asarray(scores['ZLIB'])
scores['LOWER'] = np.asarray(scores['LOWER'])
scores['WINDOW'] = np.asarray(scores['WINDOW'])
idxs = pd.Index(generated_samples)
idxs_mask = (~ idxs.duplicated())
print(idxs_mask)
generated_samples_clean = np.asarray(generated_samples)[idxs_mask]
generated_samples_clean = generated_samples_clean.tolist()
scores['XL'] = scores['XL'][idxs_mask]
scores['SMALL'] = scores['SMALL'][idxs_mask]
scores['ZLIB'] = scores['ZLIB'][idxs_mask]
scores['LOWER'] = scores['LOWER'][idxs_mask]
scores['WINDOW'] = scores['WINDOW'][idxs_mask]
assert (len(generated_samples_clean) == len(scores['XL']))
assert (len(scores['SMALL']) == len(scores['XL']))
print('Num duplicates:', (len(generated_samples) - len(generated_samples_clean)))
metric = np.log(scores['XL'])
print(f' top samples by XL perplexity: ')
print_best(metric, generated_samples_clean, 'Sort by perplexity of GPT2-XL', 'PPL-XL', scores['XL'], lower_better=True)
print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by perplexity of GPT2-XL', 'PPL-XL', scores['XL'], lower_better=True)
print()
print()
metric = (np.log(scores['XL']) / np.log(scores['SMALL']))
print(f' top samples by ratio of XL and SMALL perplexity: ')
print_best(metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL and GPT2-Small', 'PPL-XL', scores['XL'], 'PPL-SMALL', scores['SMALL'], lower_better=True)
print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL and GPT2-Small', 'PPL-XL', scores['XL'], 'PPL-SMALL', scores['SMALL'], lower_better=True)
print()
print()
metric = (np.log(scores['XL']) / np.log(scores['ZLIB']))
print(f' top samples by ratio of XL perplexity and ZLIB entropy: ')
print_best(metric, generated_samples_clean, 'Sort by ratio of XL perplexity and ZLIB entropy', 'PPL-XL', scores['XL'], 'Entropy-Zlib', scores['ZLIB'], lower_better=True)
print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by ratio of XL perplexity and ZLIB entropy', 'PPL-XL', scores['XL'], 'Entropy-Zlib', scores['ZLIB'], lower_better=True)
print()
print()
metric = (np.log(scores['XL']) / np.log(scores['LOWER']))
print(f' top samples by ratio of perplexity of GPT2-XL on normal and lower-cased sample: ')
print_best(metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL on normal and lower-cased sample', 'PPL-XL', scores['XL'], 'PPL-XL-Lower', scores['LOWER'], lower_better=True)
print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by ratio of perplexity of GPT2-XL on normal and lower-cased sample', 'PPL-XL', scores['XL'], 'PPL-XL-Lower', scores['LOWER'], lower_better=True)
print()
print()
metric = np.log(scores['WINDOW'])
print(f' top samples by minimum XL perplexity across a sliding window of size 50: ')
print_best(metric, generated_samples_clean, 'Sort by minimum perplexity of GPT2-XL on window of size 50', 'PPL-WINDOW', scores['WINDOW'], lower_better=True)
print_best_to_file(args.outfile, metric, generated_samples_clean, 'Sort by minimum perplexity of GPT2-XL on window of size 50', 'PPL-WINDOW', scores['WINDOW'], lower_better=True)
print()
print() |
def CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms):
if (complex_transforms is None):
complex_transforms = [(ComplexTransform.none, ComplexTransform.none)]
(element_a, element_b, element_c, element_epilogue) = data_type
gemm_kinds = [GemmKind.PlanarComplex, GemmKind.PlanarComplexArray]
if (manifest.args.kernels == ''):
return
for gemm_kind in gemm_kinds:
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
manifest.append(GemmOperation(gemm_kind, tile_description.minimum_compute_capability, tile_description, A, B, C, element_epilogue))
return |
class BaseImagePipeline(ABC):
def __init__(self, output_image_size: int, extra_pixels: int=0):
self.output_image_size = output_image_size
self.extra_pixels = extra_pixels
def get_image_input_size(self) -> int:
raise NotImplemented
def image_input_manipulation(self, images: Any) -> Any:
raise NotImplemented
def image_output_finalize(self, images: Any) -> Any:
raise NotImplemented |
class Semeval2016Dataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('1.1.0')
BUILDER_CONFIGS = [datasets.BuilderConfig(name='semeval2016', version=VERSION, description='Trinary sentiment task on English Twitter data.')]
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'text': datasets.Value('string'), 'label': datasets.ClassLabel(names=['positive', 'negative', 'neutral'])}), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'data_dir': data_dir, 'split': datasets.Split.TRAIN}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'data_dir': data_dir, 'split': datasets.Split.TEST}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'data_dir': data_dir, 'split': datasets.Split.VALIDATION})]
def _generate_examples(self, data_dir, split):
data_path = Path(data_dir).joinpath('2017_English_final', 'GOLD', 'Subtask_A')
filenames = {datasets.Split.TRAIN: ['twitter-2016train-A.txt'], datasets.Split.VALIDATION: ['twitter-2016dev-A.txt', 'twitter-2016devtest-A.txt'], datasets.Split.TEST: ['twitter-2016test-A.txt']}
for filename in filenames[split]:
path = data_path.joinpath(filename)
with path.open('rt') as reader:
for (_id, row) in enumerate(csv.reader(reader, delimiter='\t')):
(yield (f'{_id}_{row[0]}', {'text': row[2], 'label': row[1]})) |
class Test__StripWhitespace(unittest.TestCase):
sql = 'INSERT INTO dir_entries(type)VALUES(:type);\n\n INSERT INTO directories(inode)\n VALUES(:inode)\n LIMIT 1'
sql2 = 'SELECT child_entry,asdf AS inode, creation\n FROM links\n WHERE parent_dir == :parent_dir AND name == :name\n LIMIT 1'
sql3 = 'SELECT\n 0 AS st_dev,\n 0 AS st_uid,\n 0 AS st_gid,\n\n dir_entries.type AS st_mode,\n dir_entries.inode AS st_ino,\n COUNT(links.child_entry) AS st_nlink,\n\n :creation AS st_ctime,\n dir_entries.access AS st_atime,\n dir_entries.modification AS st_mtime,\n\n COALESCE(files.size,0) AS st_size,\n COALESCE(files.size,0) AS size\n\nFROM dir_entries\n LEFT JOIN files\n ON dir_entries.inode == files.inode\n LEFT JOIN links\n ON dir_entries.inode == links.child_entry\n\nWHERE dir_entries.inode == :inode\n\nGROUP BY dir_entries.inode\nLIMIT 1'
def test_StripWhitespace1(self):
self.assertEqual(Tokens2Unicode(StripWhitespace(tokenize(self.sql))), 'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO directories(inode)VALUES(:inode)LIMIT 1')
def test_StripWhitespace2(self):
self.assertEqual(Tokens2Unicode(StripWhitespace(tokenize(self.sql2))), 'SELECT child_entry,asdf AS inode,creation FROM links WHERE parent_dir==:parent_dir AND name==:name LIMIT 1')
def test_StripWhitespace3(self):
self.assertEqual(Tokens2Unicode(StripWhitespace(tokenize(self.sql3))), 'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN files ON dir_entries.inode==files.inode LEFT JOIN links ON dir_entries.inode==links.child_entry WHERE dir_entries.inode==:inode GROUP BY dir_entries.inode LIMIT 1') |
def get_data_loader(train_examples, label_list, max_seq_length, tokenizer, batch_size, sampler):
train_features = convert_examples_to_features(train_examples, label_list, max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_valid_ids, all_lmask_ids)
train_sampler = sampler(train_data)
dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
return dataloader |
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(((ngf * mult) / 2))), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, encoder_out=False):
if (netG == 'resnet_9blocks'):
layer_num = 18
if encoder_out:
return (self.model[:layer_num](input), self.model(input))
return self.model(input) |
('pb_scaffold')
class PropBankScaffoldSpanSrl(Model):
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, stacked_encoder: Seq2SeqEncoder, span_feedforward: FeedForward, binary_feature_dim: int, max_span_width: int, binary_feature_size: int, distance_feature_size: int, embedding_dropout: float=0.2, srl_label_namespace: str='labels', constit_label_namespace: str='constit_labels', mixing_ratio: float=1.0, cutoff_epoch: int=(- 1), fast_mode: bool=True, loss_type: str='logloss', initializer: InitializerApplicator=InitializerApplicator(), regularizer: Optional[RegularizerApplicator]=None) -> None:
super(PropBankScaffoldSpanSrl, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.embedding_dropout = Dropout(p=embedding_dropout)
self.binary_feature_embedding = Embedding(2, binary_feature_dim)
self.stacked_encoder = stacked_encoder
if ((text_field_embedder.get_output_dim() + binary_feature_dim) != stacked_encoder.get_input_dim()):
raise ConfigurationError('The SRL Model uses a binary verb indicator feature, meaning the input dimension of the stacked_encoder must be equal to the output dimension of the text_field_embedder + 1.')
self.max_span_width = max_span_width
self.span_width_embedding = Embedding(max_span_width, binary_feature_size)
self.span_distance_bin = 25
self.span_distance_embedding = Embedding(self.span_distance_bin, distance_feature_size)
self.span_direction_embedding = Embedding(2, binary_feature_size)
self.span_feedforward = TimeDistributed(span_feedforward)
self.head_scorer = TimeDistributed(torch.nn.Linear(stacked_encoder.get_output_dim(), 1))
self.num_srl_args = self.vocab.get_vocab_size(srl_label_namespace)
not_a_span_tag = self.vocab.get_token_index('*', srl_label_namespace)
outside_span_tag = self.vocab.get_token_index('O', srl_label_namespace)
self.semi_crf = SemiMarkovConditionalRandomField(num_tags=self.num_srl_args, max_span_width=max_span_width, loss_type=loss_type, default_tag=not_a_span_tag, outside_span_tag=outside_span_tag)
self.num_constit_tags = self.vocab.get_vocab_size(constit_label_namespace)
self.srl_arg_projection_layer = TimeDistributed(Linear(span_feedforward.get_output_dim(), self.num_srl_args))
self.constit_arg_projection_layer = TimeDistributed(Linear(span_feedforward.get_output_dim(), self.num_constit_tags))
self.mixing_ratio = mixing_ratio
self.cutoff_batch = cutoff_epoch
self.batch = 0
self.metrics = {'constituents': NonBioSpanBasedF1Measure(vocab, tag_namespace=constit_label_namespace, ignore_classes=['*']), 'srl': NonBioSpanBasedF1Measure(vocab, tag_namespace=srl_label_namespace, ignore_classes=['V', '*'])}
self.fast_mode = fast_mode
initializer(self)
def forward(self, tokens: Dict[(str, torch.LongTensor)], verb_indicator: torch.LongTensor, target_index: torch.LongTensor, span_starts: torch.LongTensor, span_ends: torch.LongTensor, span_mask: torch.LongTensor, constituents: torch.LongTensor=None, tags: torch.LongTensor=None) -> Dict[(str, torch.Tensor)]:
self.batch += 1
embedded_text_input = self.embedding_dropout(self.text_field_embedder(tokens))
batch_size = embedded_text_input.size(0)
text_mask = util.get_text_field_mask(tokens)
embedded_verb_indicator = self.binary_feature_embedding(verb_indicator.long())
embedded_text_with_verb_indicator = torch.cat([embedded_text_input, embedded_verb_indicator], (- 1))
embedding_dim_with_binary_feature = embedded_text_with_verb_indicator.size()[2]
if (self.stacked_encoder.get_input_dim() != embedding_dim_with_binary_feature):
raise ConfigurationError("The SRL model uses an indicator feature, which makes the embedding dimension one larger than the value specified. Therefore, the 'input_dim' of the stacked_encoder must be equal to total_embedding_dim + 1.")
encoded_text = self.stacked_encoder(embedded_text_with_verb_indicator, text_mask)
span_starts = F.relu(span_starts.float()).long().view(batch_size, (- 1))
span_ends = F.relu(span_ends.float()).long().view(batch_size, (- 1))
target_index = F.relu(target_index.float()).long().view(batch_size)
span_embeddings = span_srl_util.compute_span_representations(self.max_span_width, encoded_text, target_index, span_starts, span_ends, self.span_width_embedding, self.span_direction_embedding, self.span_distance_embedding, self.span_distance_bin, self.head_scorer)
span_scores = self.span_feedforward(span_embeddings)
srl_logits = self.srl_arg_projection_layer(span_scores)
constit_logits = self.constit_arg_projection_layer(span_scores)
output_dict = {'srl_logits': srl_logits, 'constit_logits': constit_logits, 'mask': text_mask}
tags = tags.view(batch_size, (- 1), self.max_span_width)
constituents = constituents.view(batch_size, (- 1), self.max_span_width)
if ((not self.training) or (self.training and (not self.fast_mode))):
(srl_prediction, srl_probabilities) = self.semi_crf.viterbi_tags(srl_logits, text_mask)
output_dict['srl_tags'] = srl_prediction
output_dict['srl_tag_probabilities'] = srl_probabilities
self.metrics['srl'](predictions=srl_prediction.view(batch_size, (- 1), self.max_span_width), gold_labels=tags, mask=text_mask)
reshaped_constit_logits = constit_logits.view((- 1), self.num_constit_tags)
constit_probabilities = F.softmax(reshaped_constit_logits, dim=(- 1))
constit_predictions = constit_probabilities.max((- 1))[1]
output_dict['constit_tags'] = constit_predictions
output_dict['constit_probabilities'] = constit_probabilities
constit_predictions = constit_predictions.view(batch_size, (- 1), self.max_span_width)
self.metrics['constituents'](predictions=constit_predictions, gold_labels=constituents, mask=text_mask)
if (self.training or ((not self.training) and (not self.fast_mode))):
if (tags is not None):
(srl_log_likelihood, _) = self.semi_crf(srl_logits, tags, mask=text_mask)
output_dict['srl_loss'] = (- srl_log_likelihood)
if (constituents is not None):
constituents = constituents.view(batch_size, (- 1))
constit_loss = util.sequence_cross_entropy_with_logits(constit_logits, constituents, span_mask)
output_dict['constit_loss'] = constit_loss
if ((tags is not None) and (constituents is not None)):
if (self.batch > self.cutoff_batch):
output_dict['loss'] = ((- srl_log_likelihood) + (self.mixing_ratio * constit_loss))
else:
output_dict['loss'] = (- srl_log_likelihood)
if (self.fast_mode and (not self.training)):
output_dict['loss'] = Variable(torch.FloatTensor([0.0]))
return output_dict
def decode(self, output_dict: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
raise NotImplementedError
def get_metrics(self, reset: bool=False):
short = {'precision-overall': 'c-prec', 'recall-overall': 'c-rec', 'f1-measure-overall': 'c-f1'}
metric_dict = {}
for task in self.metrics:
task_metric_dict = self.metrics[task].get_metric(reset=reset)
for (x, y) in task_metric_dict.items():
if ('overall' in x):
if (task == 'constituents'):
metric_dict[short[x]] = y
else:
metric_dict[x] = y
return metric_dict
def from_params(cls, vocab: Vocabulary, params: Params) -> 'PropBankScaffoldSpanSrl':
embedder_params = params.pop('text_field_embedder')
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
stacked_encoder = Seq2SeqEncoder.from_params(params.pop('stacked_encoder'))
span_feedforward = FeedForward.from_params(params.pop('span_feedforward'))
binary_feature_dim = params.pop('binary_feature_dim')
max_span_width = params.pop('max_span_width')
binary_feature_size = params.pop('feature_size')
distance_feature_size = params.pop('distance_feature_size', 5)
fast_mode = params.pop('fast_mode', True)
loss_type = params.pop('loss_type', 'hamming')
mixing_ratio = params.pop('mixing_ratio', 1.0)
cutoff_epoch = params.pop('cutoff_epoch', (- 1))
label_namespace = params.pop('label_namespace', 'labels')
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, binary_feature_dim=binary_feature_dim, span_feedforward=span_feedforward, max_span_width=max_span_width, binary_feature_size=binary_feature_size, distance_feature_size=distance_feature_size, srl_label_namespace=label_namespace, loss_type=loss_type, mixing_ratio=mixing_ratio, cutoff_epoch=cutoff_epoch, fast_mode=fast_mode, initializer=initializer, regularizer=regularizer) |
class RobertaForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class FederatedFastEstimator():
def __init__(self, estimator, override_config: dict=None, **kwargs):
self.estimator = estimator
self.logger = getLogger(__name__)
fx.init(**kwargs)
if override_config:
fx.update_plan(override_config)
def fit(self):
import fastestimator as fe
from fastestimator.trace.io.best_model_saver import BestModelSaver
from sys import path
file = Path(__file__).resolve()
root = file.parent.resolve()
work = Path.cwd().resolve()
path.append(str(root))
path.insert(0, str(work))
plan_config = ((Path(fx.WORKSPACE_PREFIX) / 'plan') / 'plan.yaml')
cols_config = ((Path(fx.WORKSPACE_PREFIX) / 'plan') / 'cols.yaml')
data_config = ((Path(fx.WORKSPACE_PREFIX) / 'plan') / 'data.yaml')
plan = Plan.parse(plan_config_path=plan_config, cols_config_path=cols_config, data_config_path=data_config)
self.rounds = plan.config['aggregator']['settings']['rounds_to_train']
data_loader = FastEstimatorDataLoader(self.estimator.pipeline)
runner = FastEstimatorTaskRunner(self.estimator, data_loader=data_loader)
tensor_pipe = plan.get_tensor_pipe()
init_state_path = plan.config['aggregator']['settings']['init_state_path']
(tensor_dict, holdout_params) = split_tensor_dict_for_holdouts(self.logger, runner.get_tensor_dict(False))
model_snap = utils.construct_model_proto(tensor_dict=tensor_dict, round_number=0, tensor_pipe=tensor_pipe)
self.logger.info(f'Creating Initial Weights File {init_state_path}')
utils.dump_proto(model_proto=model_snap, fpath=init_state_path)
self.logger.info('Starting Experiment...')
aggregator = plan.get_aggregator()
model_states = {collaborator: None for collaborator in plan.authorized_cols}
runners = {}
save_dir = {}
data_path = 1
for col in plan.authorized_cols:
data = self.estimator.pipeline.data
(train_data, eval_data, test_data) = split_data(data['train'], data['eval'], data['test'], data_path, len(plan.authorized_cols))
pipeline_kwargs = {}
for (k, v) in self.estimator.pipeline.__dict__.items():
if (k in ['batch_size', 'ops', 'num_process', 'drop_last', 'pad_value', 'collate_fn']):
pipeline_kwargs[k] = v
pipeline_kwargs.update({'train_data': train_data, 'eval_data': eval_data, 'test_data': test_data})
pipeline = fe.Pipeline(**pipeline_kwargs)
data_loader = FastEstimatorDataLoader(pipeline)
self.estimator.system.pipeline = pipeline
runners[col] = FastEstimatorTaskRunner(estimator=self.estimator, data_loader=data_loader)
runners[col].set_optimizer_treatment('CONTINUE_LOCAL')
for trace in runners[col].estimator.system.traces:
if isinstance(trace, BestModelSaver):
save_dir_path = f'{trace.save_dir}/{col}'
os.makedirs(save_dir_path, exist_ok=True)
save_dir[col] = save_dir_path
data_path += 1
collaborators = {collaborator: fx.create_collaborator(plan, collaborator, runners[collaborator], aggregator) for collaborator in plan.authorized_cols}
model = None
for round_num in range(self.rounds):
for col in plan.authorized_cols:
collaborator = collaborators[col]
if (round_num != 0):
runners[col].estimator.system.load_state(f'save/{col}_state')
runners[col].rebuild_model(round_num, model_states[col])
for trace in runners[col].estimator.system.traces:
if isinstance(trace, BestModelSaver):
trace.save_dir = save_dir[col]
collaborator.run_simulation()
model_states[col] = runners[col].get_tensor_dict(with_opt_vars=True)
model = runners[col].model
runners[col].estimator.system.save_state(f'save/{col}_state')
return model |
class CGP(object):
def __init__(self, net_info, eval_func, lam=4, imgSize=32, init=False):
self.lam = lam
self.pop = [Individual(net_info, init) for _ in range((1 + self.lam))]
self.eval_func = eval_func
self.num_gen = 0
self.num_eval = 0
self.max_pool_num = int((math.log2(imgSize) - 2))
self.init = init
def _evaluation(self, pop, eval_flag):
net_lists = []
active_index = np.where(eval_flag)[0]
for i in active_index:
net_lists.append(pop[i].active_net_list())
fp = self.eval_func(net_lists)
for (i, j) in enumerate(active_index):
pop[j].eval = fp[i]
evaluations = np.zeros(len(pop))
for i in range(len(pop)):
evaluations[i] = pop[i].eval
self.num_eval += len(net_lists)
return evaluations
def _log_data(self, net_info_type='active_only', start_time=0):
log_list = [self.num_gen, self.num_eval, (time.time() - start_time), self.pop[0].eval, self.pop[0].count_active_node()]
if (net_info_type == 'active_only'):
log_list.append(self.pop[0].active_net_list())
elif (net_info_type == 'full'):
log_list += self.pop[0].gene.flatten().tolist()
else:
pass
return log_list
def _log_data_children(self, net_info_type='active_only', start_time=0, pop=None):
log_list = [self.num_gen, self.num_eval, (time.time() - start_time), pop.eval, pop.count_active_node()]
if (net_info_type == 'active_only'):
log_list.append(pop.active_net_list())
elif (net_info_type == 'full'):
log_list += pop.gene.flatten().tolist()
else:
pass
return log_list
def load_log(self, log_data):
self.num_gen = log_data[0]
self.num_eval = log_data[1]
net_info = self.pop[0].net_info
self.pop[0].eval = log_data[3]
self.pop[0].gene = np.array(log_data[5:]).reshape(((net_info.node_num + net_info.out_num), (net_info.max_in_num + 1)))
self.pop[0].check_active()
def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_file='./log.txt', arch_file='./arch.txt'):
with open(log_file, 'w') as fw, open(arch_file, 'w') as fw_a, open('child.txt', 'w') as fw_c, open('arch_child.txt', 'w') as fw_ac:
writer = csv.writer(fw, lineterminator='\n')
writer_a = csv.writer(fw_a, lineterminator='\n')
writer_c = csv.writer(fw_c, lineterminator='\n')
writer_ac = csv.writer(fw_ac, lineterminator='\n')
start_time = time.time()
eval_flag = np.empty(self.lam)
active_num = self.pop[0].count_active_node()
if self.init:
pass
else:
while ((active_num < self.pop[0].net_info.min_active_num) or (active_num > self.pop[0].net_info.max_active_num)):
self.pop[0].mutation(1.0)
active_num = self.pop[0].count_active_node()
self._evaluation([self.pop[0]], np.array([True]))
print(self._log_data(net_info_type='active_only', start_time=start_time))
while (self.num_gen < max_eval):
self.num_gen += 1
for i in range(self.lam):
eval_flag[i] = False
self.pop[(i + 1)].copy(self.pop[0])
active_num = self.pop[(i + 1)].count_active_node()
while ((not eval_flag[i]) or (active_num < self.pop[(i + 1)].net_info.min_active_num) or (active_num > self.pop[(i + 1)].net_info.max_active_num)):
self.pop[(i + 1)].copy(self.pop[0])
eval_flag[i] = self.pop[(i + 1)].mutation(mutation_rate)
active_num = self.pop[(i + 1)].count_active_node()
evaluations = self._evaluation(self.pop[1:], eval_flag=eval_flag)
best_arg = evaluations.argmax()
for c in range((1 + self.lam)):
writer_c.writerow(self._log_data_children(net_info_type='full', start_time=start_time, pop=self.pop[c]))
writer_ac.writerow(self._log_data_children(net_info_type='active_only', start_time=start_time, pop=self.pop[c]))
if (evaluations[best_arg] > self.pop[0].eval):
self.pop[0].copy(self.pop[(best_arg + 1)])
else:
self.pop[0].neutral_mutation(mutation_rate)
print(self._log_data(net_info_type='active_only', start_time=start_time))
writer.writerow(self._log_data(net_info_type='full', start_time=start_time))
writer_a.writerow(self._log_data(net_info_type='active_only', start_time=start_time)) |
def _and_then(t1, t2, ctx=None):
t1 = _to_tactic(t1, ctx)
t2 = _to_tactic(t2, ctx)
if z3_debug():
_z3_assert((t1.ctx == t2.ctx), 'Context mismatch')
return Tactic(Z3_tactic_and_then(t1.ctx.ref(), t1.tactic, t2.tactic), t1.ctx) |
_class
class VELoss():
def __init__(self, sigma_min=0.02, sigma_max=100, warmup_ite=None):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.warmup_ite = warmup_ite
self.clamp_cur = 5.0
self.clamp_max = 500.0
if self.warmup_ite:
self.warmup_step = np.exp((np.log(100) / self.warmup_ite))
def __call__(self, net, images, labels, augment_pipe=None):
rnd_uniform = torch.rand([images.shape[0], 1, 1, 1], device=images.device)
sigma = (self.sigma_min * ((self.sigma_max / self.sigma_min) ** rnd_uniform))
weight = (1 / (sigma ** 2))
if self.warmup_ite:
if (self.clamp_cur < self.clamp_max):
weight.clamp_max_(self.clamp_cur)
self.clamp_cur *= self.warmup_step
(y, augment_labels) = (augment_pipe(images) if (augment_pipe is not None) else (images, None))
n = (torch.randn_like(y) * sigma)
D_yn = net((y + n), sigma, labels, augment_labels=augment_labels)
loss = (weight * ((D_yn - y) ** 2))
return loss |
def one_direction_rnn(tensor_rep, mask_rep, hn, cell_type, only_final=False, wd=0.0, keep_prob=1.0, is_train=None, is_forward=True, scope=None):
assert (not is_forward)
with tf.variable_scope(((scope or ('%s_rnn' % 'forward')) if is_forward else 'backward')):
reuse = (None if (not tf.get_variable_scope().reuse) else True)
if (cell_type == 'gru'):
cell = tf.contrib.rnn.GRUCell(hn, reuse=reuse)
elif (cell_type == 'lstm'):
cell = tf.contrib.rnn.LSTMCell(hn, reuse=reuse)
elif (cell_type == 'basic_lstm'):
cell = tf.contrib.rnn.BasicLSTMCell(hn, reuse=reuse)
elif (cell_type == 'basic_rnn'):
cell = tf.contrib.rnn.BasicRNNCell(hn, reuse=reuse)
else:
raise AttributeError(("no cell type '%s'" % cell_type))
cell_dp = SwitchableDropoutWrapper(cell, is_train, keep_prob)
tensor_len = tf.reduce_sum(tf.cast(mask_rep, tf.int32), (- 1))
(rnn_outputs, _) = dynamic_rnn(cell_dp, tensor_rep, tensor_len, dtype=tf.float32)
if (wd > 0):
add_reg_without_bias()
if (not only_final):
return rnn_outputs
else:
return get_last_state(rnn_outputs, mask_rep) |
def r_repeat(t):
(cste, stmt) = (t[1], t[3])
def fn(world, n):
if (n > MAX_FUNC_CALL):
return (world, n, False)
n += 1
s = True
for _ in range(cste()):
(world, n, s) = stmt(world, n)
if (not s):
return (world, n, s)
return (world, n, s)
return [('repeat_stmt', fn)] |
class ProbeObj(ctypes.c_void_p):
def __init__(self, probe):
self._as_parameter_ = probe
def from_param(obj):
return obj |
def saveVocabulary(name, vocab, file):
print((((('Saving ' + name) + " vocabulary to '") + file) + "'..."))
vocab.writeFile(file) |
(repr=False)
class GraphQLCase(Case):
def as_requests_kwargs(self, base_url: (str | None)=None, headers: (dict[(str, str)] | None)=None) -> dict[(str, Any)]:
final_headers = self._get_headers(headers)
base_url = self._get_base_url(base_url)
kwargs: dict[(str, Any)] = {'method': self.method, 'url': base_url, 'headers': final_headers}
if isinstance(self.body, bytes):
kwargs['data'] = self.body
kwargs['headers'].setdefault('Content-Type', 'application/json')
else:
kwargs['json'] = {'query': self.body}
return kwargs
def as_werkzeug_kwargs(self, headers: (dict[(str, str)] | None)=None) -> dict[(str, Any)]:
final_headers = self._get_headers(headers)
return {'method': self.method, 'path': self.operation.schema.get_full_path(self.formatted_path), 'headers': dict(final_headers), 'query_string': self.query, 'json': {'query': self.body}}
def validate_response(self, response: GenericResponse, checks: tuple[(CheckFunction, ...)]=(), additional_checks: tuple[(CheckFunction, ...)]=(), excluded_checks: tuple[(CheckFunction, ...)]=(), code_sample_style: (str | None)=None) -> None:
checks = (checks or (not_a_server_error,))
checks += additional_checks
checks = tuple((check for check in checks if (check not in excluded_checks)))
return super().validate_response(response, checks, code_sample_style=code_sample_style)
def call_asgi(self, app: Any=None, base_url: (str | None)=None, headers: (dict[(str, str)] | None)=None, **kwargs: Any) -> requests.Response:
return super().call_asgi(app=app, base_url=base_url, headers=headers, **kwargs) |
def variableFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view((- 1), 1))
if use_cuda:
return result.cuda()
else:
return result |
def mutual_info(prob):
m1 = np.sum(prob, axis=0, keepdims=True)
m2 = np.sum(prob, axis=1, keepdims=True)
m = (m1 * m2)
return np.sum((prob * np.log(((prob / (m + EPS)) + EPS)))) |
def init_process(backend='nccl'):
print(f'Starting process with rank {ptu.dist_rank}...', flush=True)
dist.init_process_group(backend, rank=ptu.dist_rank, world_size=ptu.world_size)
print(f'Process {ptu.dist_rank} is connected.', flush=True)
dist.barrier()
silence_print((ptu.dist_rank == 0))
if (ptu.dist_rank == 0):
print(f'All processes are connected.', flush=True) |
def add_params(size, name=''):
if (len(size) == 1):
print((((('vector ' + name) + ': ') + str(size[0])) + '; uniform in [-0.1, 0.1]'))
else:
print((((((('matrix ' + name) + ': ') + str(size[0])) + ' x ') + str(size[1])) + '; uniform in [-0.1, 0.1]'))
size_int = tuple([int(ss) for ss in size])
return torch.nn.Parameter(torch.empty(size_int).uniform_((- 0.1), 0.1)) |
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
return attention_mask |
def setup_very_basic_config(color=True):
plain_formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(name)s : %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.INFO)
if color:
formatter = ColorfulFormatter((colored('%(asctime)s | %(name)s: ', 'green') + '%(message)s'), datefmt='%Y-%m-%dT%H:%M:%S')
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logging.basicConfig(level=logging.INFO, handlers=[ch]) |
def write_planar(img, planar_path):
planar_file = open(planar_path, 'wb')
for cha in img:
(h, w) = cha.shape
for ih in range(h):
for iw in range(w):
planar_file.write(cha[(ih, iw)])
planar_file.close() |
def update_flags(flags):
if (flags.input_width is None):
flags.input_width = flags.input_height
if (flags.output_width is None):
flags.output_width = flags.output_height
flags.batch_size = 1
path = os.path.join(flags.outputsroot, flags.name)
setattr(flags, 'checkpoint_dir', os.path.join(path, 'checkpoints'))
setattr(flags, 'logdir', os.path.join(path, 'logs'))
setattr(flags, 'test_output_dir', os.path.join(path, 'test_output'))
return flags |
def test_reset(objectives):
archive = CoverageArchive(objectives)
archive.reset()
assert (archive.uncovered_goals == objectives)
assert (archive.covered_goals == OrderedSet())
assert (archive.solutions == OrderedSet()) |
class FreezeWeights(Layer):
def call(self, inputs):
inputs['encoder_output'] = K.stop_gradient(inputs['encoder_output'])
return inputs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.