code stringlengths 101 5.91M |
|---|
class GenericExperiment():
def __init__(self, name, description, run_method):
self.name = name
self.description = description
self.run_method = run_method
run_args = ['num_samples', 'seed', 'parallelize', 'show_progress']
self.params = extract_params(run_method, run_args)
def get_parameters(self):
return self.params
def run(self, num_samples, seed, parallelize=True, show_progress=False, **parameter_args):
run_parameters = self.params.project(parameter_args)
results = self.run_method(num_samples=num_samples, seed=seed, show_progress=show_progress, parallelize=parallelize, **run_parameters)
((covariates, treatment, outcome), ground_truth) = results
return Dataset(covariates=covariates, treatments=treatment, outcomes=outcome, true_effects=ground_truth) |
class TokenClassificationFields(Preprocessing):
tokens: str = 'tokens'
labels: str = 'labels' |
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride, downsample, residual=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.residual:
if (self.downsample is not None):
identity = self.downsample(identity)
x += identity
return self.relu(x) |
class ScenarioConfig():
name: str = 'intersection'
kwargs: Dict[(str, Any)] = field(default_factory=(lambda : {}))
tasks: List[str] = field(default_factory=(lambda : []))
net_path: str = ''
route_path: str = ''
add_path: str = ''
seed_offset: int = 0
seeding_mode: str = 'train'
num_maps: int =
num_traffic: int =
num_tasks: int =
behavior_dist: bool = False
behavior_dist_num: int = 100
behavior_dist_path: str = abspath(join(dirname(__file__), '..', 'data', 'vType_distribution', 'EIDM.txt'))
vTypeDistribution_py_path: str = abspath(join(dirname(__file__), '..', '..', 'tools', 'createVehTypeDistribution.py'))
ego_init: bool = False
ego_init_freeze: float = 0.0
ego_from_edges: Optional[Tuple[str]] = None
ego_to_edges: Optional[Tuple[str]] = None
ego_init_position: Union[(str, float)] = '5.0'
ego_init_lane: Union[(str, float)] = 'free'
ego_init_speed: Union[(str, float)] = 'random'
traffic_init: bool = True
traffic_init_edges: Optional[Tuple[str]] = None
traffic_init_edges_exclude: Optional[Tuple[str]] = None
traffic_init_edges_exclude_ego: bool = True
traffic_init_spread: float = 10.0
traffic_init_params: Tuple[(str, str, str)] = ('random_free', 'best', 'random')
traffic_spawn: bool = True
traffic_spawn_edges: Optional[Tuple[str]] = None
traffic_spawn_edges_exclude: Tuple[str] = tuple()
traffic_spawn_params: Tuple[(str, str, str)] = ('base', 'free', 'random')
traffic_spawn_period: float = 1.0
traffic_vTypes: Optional[Tuple[str]] = None
traffic_scale: Tuple[(float, float)] = tuple([0.4, 0.8])
ego_vType: Optional[str] = None
generation_threading: bool = True
generation_num_threads: int = 4
generation_num_buffer: int = 20 |
.parametrize('backend', ['pydub'])
.parametrize('channel_first', [False, True])
.parametrize('audio', audios)
.parametrize('source_type', ['string', 'binaryFileHandler', 'BytesIO', 'StringIO', 'strFileHandler'])
def test_ausave_and_auread(tmpdir, backend, channel_first, audio, source_type):
_change_backend(backend)
tmpdir.ensure(dir=True)
tmppath = tmpdir.join('tmp.wav')
audio_file_path = tmppath.strpath
if channel_first:
audio = audio.transpose((1, 0))
def save_audio_function(audio_file_path):
audio_utils.ausave(audio_file_path, audio, channel_first=channel_first)
if check_save_condition(backend, audio):
save_audio_function(audio_file_path)
else:
with pytest.raises(ValueError):
save_audio_function(audio_file_path)
return True
def read_audio_function(source):
return audio_utils.auread(source, channel_first=channel_first)
if (source_type == 'string'):
read_audio = read_audio_function(audio_file_path)
elif (source_type == 'binaryFileHandler'):
with open(audio_file_path, 'rb') as f:
read_audio = read_audio_function(f)
elif (source_type == 'BytesIO'):
with open(audio_file_path, 'rb') as f:
read_audio = read_audio_function(BytesIO(f.read()))
elif (source_type == 'StringIO'):
with pytest.raises(ValueError):
read_audio = read_audio_function(StringIO(audio_file_path))
return True
elif (source_type == 'strFileHandler'):
with pytest.raises(ValueError):
with open(audio_file_path, 'r') as f:
read_audio = read_audio_function(f)
return True
logger.info(read_audio.shape)
assert (read_audio.shape == audio.shape)
assert (read_audio.dtype == audio.dtype)
assert_allclose(audio, read_audio) |
.pure
def test_reshape_add():
def add_reshape(inp: dace.float64[9], bias: dace.float64[3], target_shape: dace.int64[2]):
reshaped = dace.define_local([3, 3], dace.float64)
donnx.ONNXReshape(data=inp, shape=target_shape, reshaped=reshaped)
return (reshaped + bias)
sdfg: dace.SDFG = add_reshape.to_sdfg(simplify=False)
sdfg.apply_transformations_repeated([transformation.interstate.StateFusion])
inp = np.arange(9).astype(np.float64)
bias = np.arange(3).astype(np.float64)
result = sdfg(inp=inp.copy(), bias=bias.copy(), target_shape=np.array([3, 3]).astype(np.int64))
assert_allclose(result, (inp.reshape(3, 3) + bias)) |
def mlp_actor_critic(x, a, hidden_sizes=(400, 300), activation=tf.nn.relu, output_activation=tf.tanh, action_space=None):
act_dim = a.shape.as_list()[(- 1)]
act_limit = action_space.high[0]
with tf.variable_scope('pi'):
pi = (act_limit * mlp(x, (list(hidden_sizes) + [act_dim]), activation, output_activation))
with tf.variable_scope('q1'):
q1 = tf.squeeze(mlp(tf.concat([x, a], axis=(- 1)), (list(hidden_sizes) + [1]), activation, None), axis=1)
with tf.variable_scope('q2'):
q2 = tf.squeeze(mlp(tf.concat([x, a], axis=(- 1)), (list(hidden_sizes) + [1]), activation, None), axis=1)
with tf.variable_scope('q1', reuse=True):
q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=(- 1)), (list(hidden_sizes) + [1]), activation, None), axis=1)
return (pi, q1, q2, q1_pi) |
class RedirectOut():
def __init__(self, out):
super().__init__()
self.out = out
self.original = sys.stdout
def __enter__(self):
self.__fd = open(self.out, 'w')
sys.stdout = self.__fd
def __exit__(self, type, value, traceback):
sys.stdout = self.original
self.__fd.close() |
class AugustSmartLockGenerateTemporaryAccessCode(VirtualFunctionTool):
name = 'AugustSmartLockGenerateTemporaryAccessCode'
summary = 'Generates a temporary access code that can be used to unlock the door for a specified period of time.'
parameters: List[ArgParameter] = [{'name': 'start_time', 'type': 'string', 'description': 'Start time of the access period in YYYY-MM-DD HH:mm format.', 'required': True}, {'name': 'end_time', 'type': 'string', 'description': 'End time of the access period in YYYY-MM-DD HH:mm format.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'access_code', 'type': 'string', 'description': 'The generated temporary access code.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': 'The start_time and end_time are invalid or the start_time is later than the end_time.'}] |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, OurTrainingArguments, RetrieverArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args, bertscore_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args, bertscore_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty.Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f' distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
training_args.eval_file = data_args.eval_file
assert ('json' in data_args.train_file)
data_files = {'train': data_args.train_file}
datasets = load_dataset('json', data_files=data_files)
config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
assert model_args.model_name_or_path
if ('codet5' in model_args.model_name_or_path):
tokenizer = transformers.RobertaTokenizerFast.from_pretrained(model_args.model_name_or_path, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
assert model_args.model_name_or_path
model = RetrievalModel(config=config, model_type=model_args.model_name_or_path, num_layers=bertscore_args.num_layers, all_layers=bertscore_args.all_layers, idf=bertscore_args.idf, rescale_with_baseline=bertscore_args.rescale_with_baseline, baseline_path=bertscore_args.baseline_path, tokenizer=tokenizer, training_args=training_args, model_args=model_args)
if bertscore_args.idf:
raise NotImplementedError
else:
idf_dict = defaultdict((lambda : 1.0))
idf_dict[tokenizer.sep_token_id] = 0
idf_dict[tokenizer.cls_token_id] = 0
def prepare_features(examples):
total = len(examples['text1'])
for idx in range(total):
if (examples['text1'][idx] == ''):
examples['text1'][idx] = ' '
if (examples['text2'][idx] == ''):
examples['text2'][idx] = ' '
sentences = (examples['text1'] + examples['text2'])
features = tok_sentences(tokenizer, sentences, has_hard_neg=False, total=total, max_length=data_args.max_seq_length)
return features
if training_args.do_train:
train_dataset = datasets['train'].map(prepare_features, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
data_collator = OurDataCollatorWithPadding(tokenizer.pad_token_id, idf_dict)
training_args.remove_unused_columns = False
trainer = CLTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), tokenizer=tokenizer, data_collator=data_collator)
trainer.model_args = model_args
trainer.epoch_metric = {}
trainer.metric_for_best_model = training_args.metric_for_best_model
training_args.do_eval = False
if training_args.do_train:
model_path = (model_args.model_name_or_path if ((model_args.model_name_or_path is not None) and os.path.isdir(model_args.model_name_or_path)) else None)
trainer.train(model_path=model_path)
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
results = trainer.evaluate(eval_senteval_transfer=True)
output_eval_file = os.path.join(training_args.output_dir, 'eval_results.txt')
if trainer.is_world_process_zero():
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for (key, value) in sorted(results.items()):
logger.info(f' {key} = {value}')
writer.write(f'''{key} = {value}
''')
return results |
class GaussianStrategy(ExplorationStrategy):
def __init__(self, env_spec, max_sigma=1.0, min_sigma=0.1, decay_period=1000000):
assert isinstance(env_spec.action_space, gym.spaces.Box)
assert (len(env_spec.action_space.shape) == 1)
self._max_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self._action_space = env_spec.action_space
def get_action(self, iteration, observation, policy, **kwargs):
(action, agent_info) = policy.get_action(observation)
sigma = (self._max_sigma - ((self._max_sigma - self._min_sigma) * min(1.0, ((iteration * 1.0) / self._decay_period))))
return (np.clip((action + (np.random.normal(size=len(action)) * sigma)), self._action_space.low, self._action_space.high), agent_info)
def get_actions(self, iteration, observations, policy, **kwargs):
(actions, agent_infos) = policy.get_actions(observations)
sigma = (self._max_sigma - ((self._max_sigma - self._min_sigma) * min(1.0, ((iteration * 1.0) / self._decay_period))))
return (np.clip((actions + (np.random.normal(size=len(actions)) * sigma)), self._action_space.low, self._action_space.high), agent_infos) |
def nes_op_ray_amplitude(ray, nes_op, velocity='interpolation'):
assert (velocity in velocities_list), ("Two options are supported for velocity evaluation: 'interpolation' and " + "'learned velocity'.")
dims = np.shape(ray)[(- 1)]
ray_times = np.squeeze(nes_op.Traveltime(ray))
laplacians = np.squeeze(nes_op.Laplacian(ray))
if (velocity == 'interpolation'):
ray_vels = np.squeeze(nes_op.velocity(ray))
else:
ray_vels = np.squeeze((1 / np.sqrt(np.sum((nes_op.Gradient(ray) ** 2), axis=(- 1)))))
start_vel = np.squeeze(nes_op.velocity(nes_op.xs))
start_ampl = (np.sqrt(start_vel) / (np.sqrt(np.sum(((nes_op.xs - ray[0]) ** 2))) ** (dims - 1)))
amplitude = (start_ampl * np.exp((((- 1) / 2) * np.trapz(((ray_vels ** 2) * laplacians), ray_times))))
return amplitude |
def tile_images(img_nhwc):
img_nhwc = np.asarray(img_nhwc)
(N, h, w, c) = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil((float(N) / H)))
img_nhwc = np.array((list(img_nhwc) + [(img_nhwc[0] * 0) for _ in range(N, (H * W))]))
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape((H * h), (W * w), c)
return img_Hh_Ww_c |
class Loader(yaml.Loader, metaclass=LoaderMeta):
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
filename = os.path.abspath(os.path.join(self._root, self.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if (extension in ('yaml', 'yml')):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines()) |
def prepare_data_seq(task, batch_size=100):
file_train = 'data/KVR/train_graph.txt'
file_dev = 'data/KVR/dev_graph.txt'
file_test = 'data/KVR/test_graph.txt'
(pair_train, train_max_len) = read_langs(file_train, max_line=None)
(pair_dev, dev_max_len) = read_langs(file_dev, max_line=None)
(pair_test, test_max_len) = read_langs(file_test, max_line=None)
max_resp_len = (max(train_max_len, dev_max_len, test_max_len) + 1)
lang = build_lang(pair_train, True)
train_seq = text_to_sequence(pair_train, lang)
dev_seq = text_to_sequence(pair_dev, lang)
test_seq = text_to_sequence(pair_test, lang)
train = get_seq(train_seq, batch_size, drop_remainder=False)
dev = get_seq(dev_seq, batch_size, drop_remainder=False)
test = get_seq(test_seq, batch_size, drop_remainder=False)
(context_arr, response, sketch_response, conv_arr, ptr_index, selector_index, kb_arr, context_arr_plain, response_plain, kb_arr_plain, context_arr_lengths, response_lengths, conv_arr_lengths, kb_arr_lengths, ent_index, ent_index_lengths, ent_idx_cal, ent_idx_nav, ent_idx_wet, ent_idx_cal_lengths, ent_idx_nav_lengths, ent_idx_wet_lengths, ID, deps, deps_type, cell_masks, adj, head_pointer) = next(iter(train))
print(('Read %s sentence pairs train' % len(pair_train)))
print(('Read %s sentence pairs dev' % len(pair_dev)))
print(('Read %s sentence pairs test' % len(pair_test)))
print(('Vocab_size: %s ' % lang.n_words))
print(('Max. length of system response: %s ' % max_resp_len))
print('USE_CUDA={}'.format(USE_CUDA))
return (train, dev, test, [], lang, max_resp_len, len(pair_train), len(pair_dev), len(pair_test)) |
class IInt8MinMaxCalibrator(CalibratorBase, trt.IInt8MinMaxCalibrator):
def __init__(self, *args, **kwargs):
CalibratorBase.__init__(self, *args, **kwargs)
trt.IInt8MinMaxCalibrator.__init__(self) |
class PerformanceTable():
def __init__(self, percentiles, unit, reverse_percentiles=False):
self.percentiles = percentiles
self.data = collections.defaultdict(dict)
self.unit = unit
self.reverse_percentiles = reverse_percentiles
def add(self, key, value):
(math, value) = next(iter(value.items()))
value = np.array(value)
if self.reverse_percentiles:
percentiles = [(100 - p) for p in self.percentiles]
else:
percentiles = self.percentiles
stats = []
for p in percentiles:
val = np.percentile(value, p)
stats.append((val * self.unit_convert[self.unit]))
avg = (value.mean() * self.unit_convert[self.unit])
self.data[key].update({math: (avg, stats)})
def write(self, title, math, relative=None, reverse_speedup=False):
writer = MarkdownTableWriter()
writer.table_name = f'{title} - {math.upper()}'
main_header = ['**Batch Size**', '**Beam Size**']
data_header = [f'**Avg ({self.unit})**']
data_header += [f'**{p}% ({self.unit})**' for p in self.percentiles]
if relative:
speedup_header = (['**Speedup**'] * len(data_header))
data_header = interleave(data_header, speedup_header)
writer.headers = (main_header + data_header)
writer.value_matrix = []
for (k, v) in self.data.items():
(batch_size, beam_size) = k
(avg, res_percentiles) = v[math]
main = [batch_size, beam_size]
data = [avg, *res_percentiles]
if relative:
rel = self.data[k][relative]
(rel_avg, rel_res_percentiles) = rel
rel = [rel_avg, *rel_res_percentiles]
speedup = [(d / r) for (r, d) in zip(rel, data)]
if reverse_speedup:
speedup = [(1 / s) for s in speedup]
data = interleave(data, speedup)
writer.value_matrix.append((main + data))
writer.write_table() |
def classifier_layers(x, input_shape, trainable=False):
if (K.backend() == 'tensorflow'):
x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(2, 2), trainable=trainable)
elif (K.backend() == 'theano'):
x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(1, 1), trainable=trainable)
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b', trainable=trainable)
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c', trainable=trainable)
x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)
return x |
def rf_importance(models):
return np.array([m.feature_importances_ for m in models]).mean(axis=0) |
class Iter_LR_Scheduler(object):
def __init__(self, args, max_iteration, iters_per_epoch):
self.mode = args.mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = args.base_lr
self.lr_step = args.lr_step
self.iters_per_epoch = iters_per_epoch
self.max_iteration = max_iteration
self.epoch = (- 1)
self.warmup_iters = args.warmup_iters
self.min_lr = (args.min_lr if (args.min_lr is not None) else 0)
self.warmup_start_lr = args.warmup_start_lr
self.warmup_factor = ((self.lr / args.warmup_start_lr) ** (1.0 / args.warmup_iters))
def __call__(self, optimizer, iteration):
if ((self.warmup_iters > 0) and (iteration < self.warmup_iters)):
lr = (self.warmup_start_lr * (self.warmup_factor ** iteration))
elif (self.mode == 'cos'):
lr = ((0.5 * self.lr) * (1 + math.cos((((1.0 * iteration) / self.max_iteration) * math.pi))))
elif (self.mode == 'poly'):
lr = (self.lr * pow((1 - ((iteration - self.warmup_iters) / (self.max_iteration - self.warmup_iters))), 0.9))
elif (self.mode == 'step'):
print('Warning! Now the step decline lr exists some issue')
if (not self.lr_step):
raise NotImplementedError
epoch = (iteration // self.iters_per_epoch)
lr = (self.lr * (0.1 ** (epoch // self.lr_step)))
else:
raise NotImplemented
if (iteration == self.warmup_iters):
print('==> warmup done, start to implement poly lr strategy')
if ((not (iteration % self.iters_per_epoch)) and ((iteration // self.iters_per_epoch) > self.epoch)):
epoch = (iteration // self.iters_per_epoch)
print(('\n=>Epoches %i, learning rate = %.4f' % (epoch, lr)))
self.epoch = epoch
optimizer.param_groups[0]['lr'] = max(lr, self.min_lr)
def get_lr(self, optimizer):
return optimizer.param_groups[0]['lr'] |
def hash_loop(data):
param = np.vstack(data['param'])
cmd = np.hstack(data['cmd'])
hash_str = ((sha256(np.ascontiguousarray(param).flatten()).hexdigest() + '_') + sha256(np.ascontiguousarray(cmd).flatten()).hexdigest())
uid = data['tmp_uid']
return (hash_str, uid) |
def test_missing_contrib_extra(caplog):
with mock.patch.dict(sys.modules):
sys.modules['requests'] = None
if ('pyhf.contrib.utils' in sys.modules):
reload(sys.modules['pyhf.contrib.utils'])
else:
import_module('pyhf.contrib.utils')
with caplog.at_level(logging.ERROR):
for line in ['import of requests halted; None in sys.modules', 'Installation of the contrib extra is required to use pyhf.contrib.utils.download', "Please install with: python -m pip install 'pyhf[contrib]'"]:
assert (line in caplog.text)
caplog.clear() |
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node |
class DistillKL(nn.Module):
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax((y_s / self.T), dim=1)
p_t = F.softmax((y_t / self.T), dim=1)
loss = ((F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2)) / y_s.shape[0])
return loss |
class RecoveryLikelihood(tf.keras.Model):
def __init__(self, hps):
super(RecoveryLikelihood, self).__init__()
self.hps = hps
self.num_timesteps = FLAGS.num_diffusion_timesteps
(self.sigmas, self.a_s) = get_sigma_schedule(beta_start=0.0001, beta_end=0.02, num_diffusion_timesteps=self.num_timesteps)
self.a_s_cum = np.cumprod(self.a_s)
self.sigmas_cum = np.sqrt((1 - (self.a_s_cum ** 2)))
self.a_s_prev = self.a_s.copy()
self.a_s_prev[(- 1)] = 1
self.is_recovery = np.ones((self.num_timesteps + 1), dtype=np.float32)
self.is_recovery[(- 1)] = 0
if (self.hps.img_sz == 32):
ch_mult = (1, 2, 2, 2)
elif (self.hps.img_sz == 128):
ch_mult = (1, 2, 2, 2, 4, 4)
elif (self.hps.img_sz == 64):
ch_mult = (1, 2, 2, 2, 4)
elif (self.hps.img_sz == 256):
ch_mult = (1, 1, 2, 2, 2, 4, 4)
else:
raise NotImplementedError
self.net = net_res_temb2(name='net', ch=128, ch_mult=ch_mult, num_res_blocks=FLAGS.num_res_blocks, attn_resolutions=(16,))
def init(self, x_shape):
x = tf.random.uniform(x_shape, minval=(- 0.5), maxval=0.5)
self.net(x, 0, dropout=0.0)
def _extract(a, t, x_shape):
if (isinstance(t, int) or (len(t.shape) == 0)):
t = (tf.ones(x_shape[0], dtype=tf.int32) * t)
(bs,) = t.shape
assert (x_shape[0] == bs)
out = tf.gather(tf.convert_to_tensor(a, dtype=tf.float32), t)
assert (out.shape == [bs])
return tf.reshape(out, ([bs] + ((len(x_shape) - 1) * [1])))
def q_sample(self, x_start, t, *, noise=None):
if (noise is None):
noise = tf.random.normal(shape=x_start.shape)
assert (noise.shape == x_start.shape)
x_t = ((self._extract(self.a_s_cum, t, x_start.shape) * x_start) + (self._extract(self.sigmas_cum, t, x_start.shape) * noise))
return x_t
def q_sample_pairs(self, x_start, t):
noise = tf.random.normal(shape=x_start.shape)
x_t = self.q_sample(x_start, t)
x_t_plus_one = ((self._extract(self.a_s, (t + 1), x_start.shape) * x_t) + (self._extract(self.sigmas, (t + 1), x_start.shape) * noise))
return (x_t, x_t_plus_one)
def q_sample_progressive(self, x_0):
x_preds = []
for t in range((self.num_timesteps + 1)):
t_now = (tf.ones([x_0.shape[0]], dtype=tf.int32) * t)
x = self.q_sample(x_0, t_now)
x_preds.append(x)
x_preds = tf.stack(x_preds, axis=0)
return x_preds
def training_losses(self, x_pos, x_neg, t, *, dropout=0.0):
a_s = self._extract(self.a_s_prev, (t + 1), x_pos.shape)
y_pos = (a_s * x_pos)
y_neg = (a_s * x_neg)
pos_f = self.net(y_pos, t, dropout=dropout)
neg_f = self.net(y_neg, t, dropout=dropout)
loss = (- (pos_f - neg_f))
loss_scale = (1.0 / (tf.cast(tf.gather(self.sigmas, (t + 1)), tf.float32) / self.sigmas[1]))
loss = (loss_scale * loss)
loss_ts = tf.math.unsorted_segment_mean(tf.abs(loss), t, self.num_timesteps)
f_ts = tf.math.unsorted_segment_mean(tf.abs(pos_f), t, self.num_timesteps)
return (tf.nn.compute_average_loss(loss, global_batch_size=self.hps.n_batch_train), loss_ts, f_ts)
def log_prob(self, y, t, tilde_x, b0, sigma, is_recovery, *, dropout):
return ((self.net(y, t, dropout=dropout) / tf.reshape(b0, [(- 1)])) - tf.reduce_sum((((((y - tilde_x) ** 2) / 2) / (sigma ** 2)) * is_recovery), axis=[1, 2, 3]))
def grad_f(self, y, t, tilde_x, b0, sigma, is_recovery, *, dropout):
with tf.GradientTape() as tape:
tape.watch(y)
log_p_y = self.log_prob(y, t, tilde_x, b0, sigma, is_recovery, dropout=dropout)
grad_y = tape.gradient(log_p_y, y)
return (grad_y, log_p_y)
def p_sample_langevin(self, tilde_x, t, *, dropout):
sigma = self._extract(self.sigmas, (t + 1), tilde_x.shape)
sigma_cum = self._extract(self.sigmas_cum, t, tilde_x.shape)
is_recovery = self._extract(self.is_recovery, (t + 1), tilde_x.shape)
a_s = self._extract(self.a_s_prev, (t + 1), tilde_x.shape)
c_t_square = (sigma_cum / self.sigmas_cum[0])
step_size_square = ((c_t_square * self.hps.mcmc_step_size_b_square) * (sigma ** 2))
y = tf.identity(tilde_x)
is_accepted_summary = tf.zeros(y.shape[0], dtype=tf.float32)
(grad_y, log_p_y) = self.grad_f(y, t, tilde_x, step_size_square, sigma, is_recovery, dropout=dropout)
for _ in tf.range(tf.convert_to_tensor(self.hps.mcmc_num_steps)):
noise = tf.random.normal(y.shape)
y_new = ((y + ((0.5 * step_size_square) * grad_y)) + ((tf.sqrt(step_size_square) * noise) * FLAGS.noise_scale))
(grad_y_new, log_p_y_new) = self.grad_f(y_new, t, tilde_x, step_size_square, sigma, is_recovery, dropout=dropout)
(y, grad_y, log_p_y) = (y_new, grad_y_new, log_p_y_new)
is_accepted_summary = (is_accepted_summary / tf.convert_to_tensor(self.hps.mcmc_num_steps, dtype=tf.float32))
is_accepted_summary = tf.reduce_mean(is_accepted_summary)
x = (y / a_s)
disp = tf.math.unsorted_segment_mean(tf.norm((tf.reshape(x, [x.shape[0], (- 1)]) - tf.reshape(tilde_x, [tilde_x.shape[0], (- 1)])), axis=1), t, self.num_timesteps)
return (x, disp, is_accepted_summary)
def p_sample_progressive(self, noise):
num = noise.shape[0]
x_neg_t = noise
x_neg = tf.zeros([self.hps.num_diffusion_timesteps, num, self.hps.img_sz, self.hps.img_sz, 3], dtype=tf.float32)
x_neg = tf.concat([x_neg, tf.expand_dims(noise, axis=0)], axis=0)
is_accepted_summary = tf.constant(0.0)
for t in tf.range((self.hps.num_diffusion_timesteps - 1), (- 1), (- 1)):
(x_neg_t, _, is_accepted) = self.p_sample_langevin(x_neg_t, t, dropout=0.0)
is_accepted_summary = (is_accepted_summary + is_accepted)
x_neg_t = tf.reshape(x_neg_t, [num, self.hps.img_sz, self.hps.img_sz, 3])
insert_mask = tf.equal(t, tf.range((self.hps.num_diffusion_timesteps + 1), dtype=tf.int32))
insert_mask = tf.reshape(tf.cast(insert_mask, dtype=tf.float32), [(- 1), *([1] * len(noise.shape))])
x_neg = ((insert_mask * tf.expand_dims(x_neg_t, axis=0)) + ((1.0 - insert_mask) * x_neg))
is_accepted_summary = (is_accepted_summary / tf.convert_to_tensor(self.hps.num_diffusion_timesteps, dtype=tf.float32))
return (x_neg, is_accepted_summary)
def p_sample_progressive_inner(self, noise):
num = noise.shape[0]
x_neg_t = noise
x_neg = tf.zeros([self.hps.num_diffusion_timesteps, num, self.hps.img_sz, self.hps.img_sz, 3], dtype=tf.float32)
x_neg = tf.concat([x_neg, tf.expand_dims(noise, axis=0)], axis=0)
is_accepted_summary = tf.constant(0.0)
for t in tf.range((self.hps.num_diffusion_timesteps - 1), (- 1), (- 1)):
(x_neg_t, _, is_accepted) = self.p_sample_langevin(x_neg_t, t, dropout=0.0)
is_accepted_summary = (is_accepted_summary + is_accepted)
x_neg_t = tf.reshape(x_neg_t, [num, self.hps.img_sz, self.hps.img_sz, 3])
insert_mask = tf.equal(t, tf.range((self.hps.num_diffusion_timesteps + 1), dtype=tf.int32))
insert_mask = tf.reshape(tf.cast(insert_mask, dtype=tf.float32), [(- 1), *([1] * len(noise.shape))])
x_neg = ((insert_mask * tf.expand_dims(x_neg_t, axis=0)) + ((1.0 - insert_mask) * x_neg))
is_accepted_summary = (is_accepted_summary / tf.convert_to_tensor(self.hps.num_diffusion_timesteps, dtype=tf.float32))
return (x_neg, is_accepted_summary)
def distribute_p_sample_progressive(self, noise, strategy):
(samples, is_accepted) = strategy.run(self.p_sample_progressive_inner, args=(noise,))
samples = tf.concat(samples.values, axis=1)
is_accepted = strategy.reduce(tf.distribute.ReduceOp.MEAN, is_accepted, axis=None)
return (samples, is_accepted) |
class DoubleType(FloatType):
__slots__ = ()
exp = 11
frac = 53
def __str__(self):
return 'double' |
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_((torch.Tensor(sibling_rank) * diversity_rate))
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
self.assertLess(abs((score - hypo['score'])), 1e-06)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(self.tgt_dict, diversity_rate=0.5)
generator = SequenceGenerator(self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) |
.skipif((not _ti_core.GGUI_AVAILABLE), reason='GGUI Not Available')
_utils.test(arch=supported_archs)
def test_draw_part_of_particles_per_vertex_rad_and_col_old():
N = 10
particles_pos = ti.Vector.field(3, dtype=ti.f32, shape=N)
particles_col = ti.Vector.field(3, dtype=ti.f32, shape=N)
particles_radii = ti.field(dtype=ti.f32, shape=N)
def init_points_pos(points: ti.template()):
for i in range(points.shape[0]):
points[i] = [i for j in ti.static(range(3))]
def init_points_col(points: ti.template()):
for i in range(points.shape[0]):
points[i] = [((i + 1) / N), 0.5, ((i + 1) / N)]
def init_points_radii(radii: ti.template()):
for i in range(radii.shape[0]):
radii[i] = ((i + 1) * 0.05)
init_points_pos(particles_pos)
init_points_radii(particles_radii)
init_points_col(particles_col)
window = ti.ui.Window('Test', (768, 768), show_window=False)
canvas = window.get_canvas()
scene = ti.ui.Scene()
camera = ti.ui.Camera()
camera.position(0, 5, (- 10))
camera.lookat(3, 3, 1)
def render():
scene.set_camera(camera)
scene.ambient_light((0.8, 0.8, 0.8))
scene.point_light(pos=(0.5, 1.5, 1.5), color=(1, 1, 1))
scene.particles(particles_pos, color=(0.68, 0.26, 0.19), radius=0.5, per_vertex_color=particles_col, per_vertex_radius=particles_radii, index_offset=2, index_count=6)
canvas.scene(scene)
for _ in range(RENDER_REPEAT):
render()
window.get_image_buffer_as_numpy()
render()
verify_image(window.get_image_buffer_as_numpy(), 'test_draw_part_of_particles_per_vertex_rad_and_col')
window.destroy() |
class DomainRegistrarService(Service):
def __init__(self):
super().__init__()
self.addDependency('Base', False, False)
def getName(self) -> str:
return 'DomainRegistrarService'
def _createServer(self) -> DomainRegistrarServer:
return DomainRegistrarServer()
def _doConfigure(self, node: Node, server: Server):
assert ('DomainNameService' in node.getAttribute('services')), 'DomainNameService required on node to use DomainRegistrarService.'
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'DomainRegistrarService\n'
return out |
class ProteinOneHotAbstractModel(ProteinModel):
config_class = ProteinOneHotConfig
pretrained_model_archive_map: typing.Dict[(str, str)] = {}
base_model_prefix = 'onehot'
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_() |
class RecordLookup(ContentLookup):
LENGTH = 0
CONTENTS = 1
def tolookup(cls, layout, positions):
pos = len(positions)
positions.append(len(layout))
positions.extend(([None] * len(layout.contents)))
for (i, content) in enumerate(layout.contents):
positions[((pos + cls.CONTENTS) + i)] = tolookup(content, positions)
return pos
def tolayout(self, lookup, pos, fields):
if (len(fields) > 0):
index = self.fieldindex(fields[0])
assert (index is not None)
return self.contenttypes[index].tolayout(lookup, lookup.positions[((pos + self.CONTENTS) + index)], fields[1:])
else:
contents = []
for (i, contenttype) in enumerate(self.contenttypes):
layout = contenttype.tolayout(lookup, lookup.positions[((pos + self.CONTENTS) + i)], fields)
contents.append(layout)
return ak.contents.RecordArray(contents, self.fields, lookup.positions[(pos + self.LENGTH)], parameters=self.parameters) |
class _VocabParallelCrossEntropy(torch.autograd.Function):
def forward(ctx, vocab_parallel_logits, target):
logits = vocab_parallel_logits.clone()
logits_max = torch.max(logits, dim=(- 1))[0]
torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=get_model_parallel_group())
logits.sub_(logits_max.unsqueeze(dim=(- 1)))
exp_logits = logits.exp()
sum_exp_logits = exp_logits.sum(dim=(- 1))
torch.distributed.all_reduce(sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=get_model_parallel_group())
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[(- 1)]
rank = get_model_parallel_rank()
world_size = get_model_parallel_world_size()
(vocab_start_index, vocab_end_index) = get_vocab_range(partition_vocab_size, rank, world_size)
target_mask = ((target < vocab_start_index) | (target >= vocab_end_index))
masked_target = (target.clone() - vocab_start_index)
masked_target[target_mask] = 0
logits_2d = logits.view((- 1), partition_vocab_size)
masked_target_1d = masked_target.view((- 1))
arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
predicted_logits_1d = logits_2d[(arange_1d, masked_target_1d)]
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
torch.distributed.all_reduce(predicted_logits, op=torch.distributed.ReduceOp.SUM, group=get_model_parallel_group())
loss = (torch.log(sum_exp_logits) - predicted_logits)
exp_logits.div_(sum_exp_logits.unsqueeze(dim=(- 1)))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
def backward(ctx, grad_output):
(softmax, target_mask, masked_target_1d) = ctx.saved_tensors
grad_input = softmax
partition_vocab_size = softmax.size()[(- 1)]
grad_2d = grad_input.view((- 1), partition_vocab_size)
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
grad_2d[(arange_1d, masked_target_1d)] -= (1.0 - target_mask.view((- 1)).float())
grad_input.mul_(grad_output.unsqueeze(dim=(- 1)))
return (grad_input, None) |
def configure_ws_ovpn_container():
base_path = 'ws-ovpn/'
env_file = (base_path + 'ovpn_env.sh')
change_line(env_file, 24, (('declare -x OVPN_SERVER_URL=udp://' + str(os.getenv('GW_NETWORK_HEAD'))) + '.0.10')) |
def gather(outputs, target_device, dim=0):
error_msg = 'outputs must contain tensors, numbers, dicts or lists; found {}'
def gather_map(outputs):
out = outputs[0]
elem_type = type(out)
if isinstance(out, Variable):
return Gather.apply(target_device, dim, *outputs)
if (out is None):
return None
if isinstance(out, collections.Sequence):
return type(out)(map(gather_map, zip(*outputs)))
elif isinstance(out, collections.Mapping):
return {key: gather_map([d[key] for d in outputs]) for key in out}
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
elem = out
if (elem_type.__name__ == 'ndarray'):
if (re.search('[SaUO]', elem.dtype.str) is not None):
raise TypeError(error_msg.format(elem.dtype))
return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
if (elem.shape == ()):
py_type = (float if elem.dtype.name.startswith('float') else int)
return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
elif isinstance(out, int_classes):
return Variable(torch.LongTensor(outputs))
elif isinstance(out, float):
return Variable(torch.DoubleTensor(outputs))
elif isinstance(out, string_classes):
return outputs
raise TypeError(error_msg.format(elem_type))
try:
return gather_map(outputs)
finally:
gather_map = None |
def test_parse_path():
assert (parse_path('/') == ('local', '/', '/'))
assert (parse_path('/tmp') == ('local', '/tmp', '/tmp'))
assert (parse_path('does-not-exist-0000000/file') == ('local', 'does-not-exist-0000000/file', 'does-not-exist-0000000/file'))
assert (parse_path('s3://bucket') == ('aws', 'bucket', ''))
assert (parse_path('s3://bucket/') == ('aws', 'bucket', ''))
assert (parse_path('s3://bucket/key') == ('aws', 'bucket', 'key'))
assert (parse_path('gs://bucket') == ('gcp', 'bucket', ''))
assert (parse_path('gs://bucket/') == ('gcp', 'bucket', ''))
assert (parse_path('gs://bucket/key') == ('gcp', 'bucket', 'key'))
assert (parse_path(' == ('azure', 'bucket/container', ''))
assert (parse_path(' == ('azure', 'bucket/container', ''))
assert (parse_path(' == ('azure', 'bucket/container', 'key'))
assert (parse_path('azure://bucket/container') == ('azure', 'bucket/container', ''))
assert (parse_path('azure://bucket/container/') == ('azure', 'bucket/container', ''))
assert (parse_path('azure://bucket/container/key') == ('azure', 'bucket/container', 'key'))
assert (parse_path('azure://bucket/container/key/path') == ('azure', 'bucket/container', 'key/path')) |
def detections_to_tracks(detections):
tracks = defaultdict(list)
for det in detections:
tracks[det.track_id].append(det)
for track_id in tracks:
tracks[track_id] = sorted(tracks[track_id], key=(lambda d: d.frame_id))
return list(tracks.values()) |
class MultiDecoder(object):
def __init__(self, modes):
self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')]
def flush(self):
return self._decoders[0].flush()
def decompress(self, data):
for d in reversed(self._decoders):
data = d.decompress(data)
return data |
def test_pbmc_cite(save_path):
file_path = os.path.join(save_path, '10X/pbmc_10k_protein_v3/filtered_feature_bc_matrix.tar.gz')
sp = os.path.join(save_path, '10X/pbmc_10k_protein_v3/')
tar = tarfile.open(file_path, 'r:gz')
tar.extractall(path=sp)
tar.close()
dataset = sc.read_10x_mtx(os.path.join(sp, 'filtered_feature_bc_matrix'), gex_only=False)
organize_cite_seq_10x(dataset)
unsupervised_training_one_epoch(dataset) |
def test_se_layer():
with pytest.raises(AssertionError):
SELayer(channels=32, act_cfg=(dict(type='ReLU'),))
with pytest.raises(AssertionError):
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert (x_out.shape == torch.Size((1, 32, 10, 10))) |
class MockDDPWrapper(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, x):
return self.module(x) |
class C(FairseqDataclass):
data: str = field(default='test', metadata={'help': 'root level data input'})
encoder: D = field(default=D())
decoder: A = field(default=A())
lr: int = field(default=0, metadata={'help': 'learning rate'}) |
class TextDecoder():
def __init__(self, vocab):
self._vocab = vocab
self._vocab_size = vocab.get_size()
self._complete_seqs = []
self._complete_seqs_scores = []
def init_batch(self, sample_list):
img_size = sample_list.image_feature_0.size()
(self._batch_size, feature_size_1, feature_size_2) = img_size
t_batch_size = (self._batch_size * self._decode_size)
self.seqs = sample_list.answers.new_full((t_batch_size, 1), self._vocab.SOS_INDEX, dtype=torch.long)
sample_list.image_feature_0 = sample_list.image_feature_0.unsqueeze(1).expand((- 1), self._decode_size, (- 1), (- 1)).reshape(t_batch_size, feature_size_1, feature_size_2)
self.sample_list = sample_list
return sample_list
def add_next_word(self, seqs, prev_word_inds, next_word_inds):
return torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1)
def find_complete_inds(self, next_word_inds):
incomplete_inds = []
for (ind, next_word) in enumerate(next_word_inds):
if (next_word != self._vocab.EOS_INDEX):
incomplete_inds.append(ind)
complete_inds = list((set(range(len(next_word_inds))) - set(incomplete_inds)))
return (complete_inds, incomplete_inds)
def update_data(self, data, prev_word_inds, next_word_inds, incomplete_inds):
data['texts'] = next_word_inds[incomplete_inds].unsqueeze(1)
h1 = data['state']['td_hidden'][0][prev_word_inds[incomplete_inds]]
c1 = data['state']['td_hidden'][1][prev_word_inds[incomplete_inds]]
h2 = data['state']['lm_hidden'][0][prev_word_inds[incomplete_inds]]
c2 = data['state']['lm_hidden'][1][prev_word_inds[incomplete_inds]]
data['state'] = {'td_hidden': (h1, c1), 'lm_hidden': (h2, c2)}
return data |
def search_limbs(data_source: str, mask: Optional[Union[(np.ndarray, tuple, list)]]=None, keypoints_factory: dict=KEYPOINTS_FACTORY) -> Tuple[(dict, dict)]:
limbs_source = HUMAN_DATA_LIMBS_INDEX
limbs_palette = HUMAN_DATA_PALETTE
keypoints_source = keypoints_factory['human_data']
keypoints_target = keypoints_factory[data_source]
limbs_target = {}
for (k, part_limbs) in limbs_source.items():
limbs_target[k] = []
for limb in part_limbs:
flag = False
if ((keypoints_source[limb[0]] in keypoints_target) and (keypoints_source[limb[1]] in keypoints_target)):
if (mask is not None):
if ((mask[keypoints_target.index(keypoints_source[limb[0]])] != 0) and (mask[keypoints_target.index(keypoints_source[limb[1]])] != 0)):
flag = True
else:
flag = True
if flag:
limbs_target.setdefault(k, []).append([keypoints_target.index(keypoints_source[limb[0]]), keypoints_target.index(keypoints_source[limb[1]])])
if (k in limbs_target):
if (k == 'body'):
np.random.seed(0)
limbs_palette[k] = np.random.randint(0, high=255, size=(len(limbs_target[k]), 3))
else:
limbs_palette[k] = np.array(limbs_palette[k])
return (limbs_target, limbs_palette) |
def test_WatchYourStep_save_load(tmpdir, barbell):
generator = AdjacencyPowerGenerator(barbell, num_powers=5)
wys = WatchYourStep(generator)
test_utils.model_save_load(tmpdir, wys) |
def annotations_to_instances(annos, image_size, sample_points=0):
target = base_annotations_to_instances(annos, image_size)
assert ('point_coords' in annos[0])
assert ('point_labels' in annos[0])
assert ('segmentation' not in annos[0]), 'Please remove mask annotation'
if (len(annos) and ('point_labels' in annos[0])):
point_coords = []
point_labels = []
for (i, _) in enumerate(annos):
point_coords_wrt_image = np.array(annos[i]['point_coords'])
point_labels_wrt_image = np.array(annos[i]['point_labels'])
if (sample_points > 0):
random_indices = np.random.choice(point_coords_wrt_image.shape[0], sample_points, replace=(point_coords_wrt_image.shape[0] < sample_points)).astype(int)
point_coords_wrt_image = point_coords_wrt_image[random_indices]
point_labels_wrt_image = point_labels_wrt_image[random_indices]
assert (point_coords_wrt_image.shape[0] == point_labels_wrt_image.size)
point_coords.append(point_coords_wrt_image)
point_labels.append(point_labels_wrt_image)
point_coords = torch.stack([torch.from_numpy(x) for x in point_coords])
point_labels = torch.stack([torch.from_numpy(x) for x in point_labels])
target.gt_point_coords = point_coords
target.gt_point_labels = point_labels
return target |
def get_model(data_path='/tmp'):
model_name = 'zoo:sensitive_topics_classifier/model'
model_file = modelzoo_path(data_path, model_name)
optfile = (model_file + '.opt')
opt = Opt.load(optfile)
TCA.upgrade_opt(opt)
opt['model_file'] = model_file
opt['dict_file'] = (model_file + '.dict')
model = TCA(opt)
model.model.eval()
return model |
def convert_secs2time(epoch_time, return_str=False):
need_hour = int((epoch_time / 3600))
need_mins = int(((epoch_time - (3600 * need_hour)) / 60))
need_secs = int(((epoch_time - (3600 * need_hour)) - (60 * need_mins)))
if return_str:
str = '[{:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
return str
else:
return (need_hour, need_mins, need_secs) |
def randmat(shape, name, mu=0.0, type_init='he2', type_dist='normal', trainable=True, extra_scale=1.0):
if (len(shape) == 1):
(dim_in, dim_out) = (shape[0], 0)
elif (len(shape) == 2):
(dim_in, dim_out) = shape
else:
(dim_in, dim_out) = (np.prod(shape[1:]), shape[0])
if (type_init == 'xavier'):
bound = np.sqrt((1.0 / dim_in))
elif (type_init == 'xavier2'):
bound = np.sqrt((2.0 / (dim_in + dim_out)))
elif (type_init == 'he'):
bound = np.sqrt((2.0 / dim_in))
elif (type_init == 'he2'):
bound = np.sqrt((4.0 / (dim_in + dim_out)))
elif (type_init == 'regular'):
bound = sigma_init
else:
raise Exception()
if (type_dist == 'normal'):
val = tf.random_normal(shape, mean=mu, stddev=(extra_scale * bound), dtype=dtype)
else:
val = tf.random_uniform(shape, minval=(mu - (extra_scale * bound)), maxval=(mu + (extra_scale * bound)), dtype=dtype)
return tf.Variable(initial_value=val, name=name, trainable=trainable) |
class ExplicitEnum(Enum):
def _missing_(cls, value):
raise ValueError(('%r is not a valid %s, please select one of %s' % (value, cls.__name__, str(list(cls._value2member_map_.keys()))))) |
def main():
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--model', type=str, default='unets', metavar='model', help='training model name, uit (integral transformer), ut (with traditional softmax normalization), hut (hybrid ut with linear attention), xut (cross-attention with hadamard product interaction), fno2d (Fourier neural operator 2d), unet (traditional UNet with CNN, big baseline, 33m params), unets (UNet with the same number of layers with U-integral transformer). default: unets)')
parser.add_argument('--parts', nargs='+', default=[p for p in range(4, 7)], help='parts of data used in training/evaluation. default: [4, 5, 6]')
parser.add_argument('--plot-index', type=int, default=6, metavar='idx_draw', help='the index of the inclusion to plot (default: 6)')
parser.add_argument('--channels', type=int, default=1, metavar='num_chan', help='the number of channels of feature maps (default: 1)')
parser.add_argument('--subsample', type=int, default=1, metavar='sample_scaling', help='subsample scale, subsample=2 means (101,101) input (default: 1)')
parser.add_argument('--batch-size', type=int, default=10, metavar='batch_size', help='batch size for testing set (default: 10)')
parser.add_argument('--epochs', type=int, default=50, metavar='epochs', help='number of epochs (default: 50)')
parser.add_argument('--patience', type=int, default=15, metavar='patience', help='early stopping epochs (default: 15)')
parser.add_argument('--lr', type=float, default=0.001, metavar='learning_rate', help='maximum learning rate (default: 1e-3)')
parser.add_argument('--no-grad-channel', action='store_true', default=False)
args = parser.parse_args()
config = load_yaml('./configs.yml', key=args.model)
print(((('=' * 10) + 'Model setting:') + ('=' * 10)))
for a in config.keys():
if (not a.startswith('__')):
print(f'{a}: {config[a]}')
print(('=' * 33))
if (args.model in ['uit', 'uit-c3', 'uit-c', 'ut', 'xut']):
from libs.ut import UTransformer
model = UTransformer(**config)
elif (args.model in ['hut']):
from libs.hut import HybridUT
model = HybridUT(**config)
elif (args.model in ['fno2d', 'fno2d-big']):
from libs.fno import FourierNeuralOperator
model = FourierNeuralOperator(**config)
elif (args.model in ['unet', 'unet-small']):
from libs.unet import UNet
model = UNet(**config)
else:
raise NotImplementedError
print(f'''
Training for {model.__class__.__name__} with {get_num_params(model)} params
''')
model.to(device)
train_dataset = EITDataset(part_idx=args.parts, file_type='h5', subsample=args.subsample, channel=args.channels, return_grad=(not args.no_grad_channel), online_grad=False, train_data=True)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, pin_memory=True)
valid_dataset = EITDataset(part_idx=args.parts, file_type='h5', channel=args.channels, return_grad=(not args.no_grad_channel), online_grad=False, subsample=args.subsample, train_data=False)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, drop_last=False, pin_memory=True)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = OneCycleLR(optimizer, max_lr=args.lr, div_factor=1000.0, final_div_factor=10000.0, steps_per_epoch=len(train_loader), pct_start=0.2, epochs=args.epochs)
loss_func = CrossEntropyLoss2d(regularizer=False, h=h, gamma=0.1)
metric_func = L2Loss2d(regularizer=False)
result = run_train(model, loss_func, metric_func, train_loader, valid_loader, optimizer, scheduler, train_batch=train_batch_eit, validate_epoch=validate_epoch_eit, epochs=args.epochs, patience=args.patience, model_name=(config.weights_filename + '.pt'), result_name=(config.weights_filename + '.pkl'), tqdm_mode='batch', mode='min', device=device)
print('Training done.') |
def test_recordarray_1():
def func_recordarray_1(x):
return ((2 * x.y[2][0][1]) + 10)
(value_jvp, jvp_grad) = jax.jvp(func_recordarray_1, (test_recordarray,), (test_recordarray_tangent,))
(value_vjp, vjp_func) = jax.vjp(func_recordarray_1, test_recordarray)
assert (ak.to_list(value_jvp) == 14.0)
assert (ak.to_list(value_vjp) == 14.0)
assert (ak.to_list(jvp_grad) == 1.0)
assert (ak.to_list(vjp_func(value_vjp)[0]) == [[{'x': 0.0, 'y': [0.0]}, {'x': 0.0, 'y': [0.0, 0.0]}], [], [{'x': 0.0, 'y': [0.0, 28.0, 0.0]}]]) |
def get_ava_eval_data(scores, boxes, metadata, class_whitelist, verbose=False, video_idx_to_name=None):
out_scores = defaultdict(list)
out_labels = defaultdict(list)
out_boxes = defaultdict(list)
count = 0
for i in range(scores.shape[0]):
video_idx = int(np.round(metadata[i][0]))
sec = int(np.round(metadata[i][1]))
video = video_idx_to_name[video_idx]
key = ((video + ',') + ('%04d' % sec))
batch_box = boxes[i].tolist()
batch_box = [batch_box[j] for j in [0, 2, 1, 4, 3]]
one_scores = scores[i].tolist()
for (cls_idx, score) in enumerate(one_scores):
if ((cls_idx + 1) in class_whitelist):
out_scores[key].append(score)
out_labels[key].append((cls_idx + 1))
out_boxes[key].append(batch_box[1:])
count += 1
return (out_boxes, out_labels, out_scores) |
def _sympysage_polynomial(self):
base_ring = self.domain._sage_()
variables = ','.join(map(str, self.gens))
R = base_ring[variables]
return R.sum(((base_ring(coeff) * R.monomial(*exp)) for (exp, coeff) in self.rep.terms(order=None))) |
def perspectivex_grid(output_size, ulim=(1, 8), vlim=((((- 0.99) * np.pi) / 2), ((0.99 * np.pi) / 2)), out=None, device=None):
(nv, nu) = output_size
urange = torch.linspace(ulim[0], ulim[1], nu, device=device)
vrange = torch.linspace(vlim[0], vlim[1], (nv // 2), device=device)
(vs, us) = torch.meshgrid([vrange, urange])
xl = ((- 1) / us.flip([1]))
xr = (1 / us)
yl = ((- xl) * torch.tan(vs))
yr = (xr * torch.tan(vs))
if ((nv % 2) == 0):
xs = torch.cat([xl, xr])
ys = torch.cat([yl, yr])
else:
xs = torch.cat([xl, xl.narrow(0, (xl.shape[0] - 1), 1), xr])
ys = torch.cat([yl, yl.narrow(0, (yl.shape[0] - 1), 1), yr])
return torch.stack([xs, ys], 2, out=out) |
def onnx_verify(onnx_model, inputs, ref_outputs):
prepared = caffe2.python.onnx.backend.prepare(onnx_model)
onnx_inputs = []
for input in inputs:
if isinstance(input, tuple):
onnx_inputs.append(input[1])
else:
onnx_inputs.append(input)
onnx_outputs = prepared.run(inputs=onnx_inputs)
np.testing.assert_almost_equal(onnx_outputs, ref_outputs, decimal=3) |
def test_inner_dereference(testdir):
testdir.make_test('\(method="POST")\(max_examples=1)\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n assert case.path == "/users"\n assert case.method == "POST"\n assert_int(case.body["id"])\n', paths={'/users': {'post': {'parameters': [{'schema': {'type': 'object', 'required': ['id'], 'properties': {'id': {'$ref': '#/definitions/SimpleIntRef'}}}, 'in': 'body', 'name': 'object', 'required': True}], 'responses': {'200': {'description': 'OK'}}}}}, definitions={'SimpleIntRef': {'type': 'integer'}})
result = testdir.runpytest('-v', '-s')
result.assert_outcomes(passed=1)
result.stdout.re_match_lines(['Hypothesis calls: 1$']) |
def _save_eval_stats(opt, report):
if (not is_primary_worker):
return
report_fname = opt['report_filename']
if (report_fname == ''):
return
json_serializable_report = report
for (k, v) in report.items():
if isinstance(v, Metric):
v = v.value()
json_serializable_report[k] = v
with PathManager.open(report_fname, 'w') as f:
logging.info(f'Saving model report to {report_fname}')
json.dump({'opt': opt, 'report': json_serializable_report}, f, indent=4)
f.write('\n') |
.parametrize('flatlist_as_rvec', [False, True])
def test_nested_NumpyArray(flatlist_as_rvec):
v2a = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 1, 5], dtype=np.int64)), ak.contents.numpyarray.NumpyArray(np.array([999.0, 0.0, 1.1, 2.2, 3.3]), parameters={'some': 'stuff', 'other': [1, 2, 'three']}))
layout = v2a
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=flatlist_as_rvec)
lookup = ak._lookup.Lookup(layout, generator)
generator.generate(compiler)
ROOT.gInterpreter.Declare(f'''
void roottest_nested_NumpyArray_v2a_{flatlist_as_rvec}(double* out, ssize_t length, ssize_t* ptrs) {{
auto obj = {generator.dataset()}[1];
out[0] = obj.size();
out[1] = obj[1];
out[2] = obj[3];
}}
''')
out = np.zeros(3, dtype=np.float64)
getattr(ROOT, f'roottest_nested_NumpyArray_v2a_{flatlist_as_rvec}')(out, len(layout), lookup.arrayptrs)
assert (out.tolist() == [4.0, 1.1, 3.3]) |
def _seed_dataset_transform(transform, seed=None):
if isinstance(transform, Compose):
for subtransform in transform.transforms:
_seed_dataset_transform(subtransform, seed=seed)
elif hasattr(transform, 'seed'):
transform.seed(seed=seed) |
def modification_time(representation_list):
return max((r['modified'] for r in representation_list)) |
class FPN(tf.keras.layers.Layer):
def __init__(self, filters=256, min_level=3, max_level=7, backbone_max_level=5, fusion_mode=None, conv_2d_op_params=None, normalization_op_params=None, activation_fn=None, **kwargs):
if (activation_fn is None):
raise ValueError('`activation_fn` cannot be None')
super(FPN, self).__init__(**kwargs)
self.filters = filters
self.min_level = min_level
self.max_level = max_level
self.fusion_mode = fusion_mode
self.backbone_max_level = backbone_max_level
normalization_op = get_normalization_op(**normalization_op_params)
self.upsample_op = functools.partial(NearestUpsampling2D, scale=2)
self.lateral_convs = {}
self.output_convs = {}
self.output_norms = {}
self.fusion_ops = {}
self.activation_ops = {}
if (not conv_2d_op_params.use_seperable_conv):
conv_2d_op = tf.keras.layers.Conv2D
kernel_initializer_config = {'kernel_initializer': tf.initializers.VarianceScaling()}
else:
conv_2d_op = tf.keras.layers.SeparableConv2D
kernel_initializer_config = {'depthwise_initializer': tf.initializers.VarianceScaling(), 'pointwise_initializer': tf.initializers.VarianceScaling()}
for level in range(min_level, (backbone_max_level + 1)):
level = str(level)
self.lateral_convs[level] = conv_2d_op(filters=self.filters, kernel_size=1, strides=1, padding='same', name=(('l' + str(level)) + '-conv2d'), **kernel_initializer_config)
if (int(level) != min_level):
self.fusion_ops[level] = FeatureFusion(mode=fusion_mode, filters=filters, name=((('fusion-l' + str((int(level) - 1))) + '-m') + level))
for level in range(min_level, (max_level + 1)):
level = str(level)
self.output_norms[level] = normalization_op(name=(('p' + str(level)) + '-batch_normalization'))
self.output_convs[level] = conv_2d_op(filters=self.filters, kernel_size=3, padding='same', strides=(2 if (int(level) > backbone_max_level) else 1), use_bias=conv_2d_op_params.use_bias_before_bn, name=(('p' + str(level)) + '-conv2d'), **kernel_initializer_config)
for level in range((backbone_max_level + 1), max_level):
level = str(level)
self.activation_ops[level] = activation_fn(name='p{}'.format(level))
def call(self, features, training=None):
outputs = {}
for level in range(self.min_level, (self.backbone_max_level + 1)):
level = str(level)
conv_layer = self.lateral_convs[level]
outputs[level] = conv_layer(features[level])
for level in range(self.backbone_max_level, self.min_level, (- 1)):
level = str(level)
name = 'm{}-upsample'.format(level)
outputs[str((int(level) - 1))] = self.fusion_ops[level]([outputs[str((int(level) - 1))], self.upsample_op(name=name)(outputs[level])])
for level in range(self.min_level, (self.max_level + 1)):
level = str(level)
if (int(level) <= self.backbone_max_level):
outputs[level] = self.output_convs[level](outputs[level])
elif (int(level) == (self.backbone_max_level + 1)):
outputs[level] = self.output_convs[level](outputs[str((int(level) - 1))])
else:
prev_level_output = self.activation_ops[str((int(level) - 1))](outputs[str((int(level) - 1))])
outputs[level] = self.output_convs[level](prev_level_output)
for level in range(self.min_level, (self.max_level + 1)):
level = str(level)
outputs[level] = self.output_norms[level](outputs[level], training=training)
return outputs |
def test_checkpoint_hook_register(tmpdir):
from speechbrain.utils.checkpoints import register_checkpoint_hooks
from speechbrain.utils.checkpoints import mark_as_saver
from speechbrain.utils.checkpoints import mark_as_loader
from speechbrain.utils.checkpoints import Checkpointer
_checkpoint_hooks
class CustomRecoverable():
def __init__(self, param):
self.param = int(param)
_as_saver
def save(self, path):
with open(path, 'w') as fo:
fo.write(str(self.param))
_as_loader
def load(self, path, end_of_epoch, device):
del end_of_epoch
with open(path) as fi:
self.param = int(fi.read())
recoverable = CustomRecoverable(1.0)
checkpointer = Checkpointer(tmpdir, {'recoverable': recoverable})
checkpointer.save_checkpoint()
recoverable.param = 2.0
checkpointer.recover_if_possible()
assert (recoverable.param == 1.0)
with pytest.raises(TypeError):
class BadRecoverable():
def __init__(self, param):
self.param = int(param)
def save(self, path):
with open(path, 'w') as fo:
fo.write(str(self.param))
_as_loader
def load(self, path, end_of_epoch):
del end_of_epoch
with open(path) as fi:
self.param = int(fi.read())
with pytest.raises(TypeError):
class BadRecoverable():
def __init__(self, param):
self.param = int(param)
_as_saver
def save(self, path, extra_arg):
with open(path, 'w') as fo:
fo.write(str(self.param))
def load(self, path, end_of_epoch, device):
del end_of_epoch
with open(path) as fi:
self.param = int(fi.read()) |
def random_topology_func(op_names, max_nodes=4):
def random_architecture():
genotypes = []
for i in range(1, max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = random.choice(op_names)
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
return random_architecture |
def number_of_arguments(func):
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return ((total_args - len(func.args)) - len(func.keywords))
return len(inspect.signature(func).parameters) |
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
if (pool_type == 'avgmaxc'):
x = torch.cat([F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad), F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)], dim=1)
elif (pool_type == 'avgmax'):
x_avg = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = (0.5 * (x_avg + x_max))
elif (pool_type == 'max'):
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if (pool_type != 'avg'):
print(('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type))
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x |
def test_set_log_level(caplog):
cashocs.set_log_level(cashocs.LogLevel.DEBUG)
issue_messages()
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
assert ('abc' in caplog.text)
assert ('def' in caplog.text)
assert ('ghi' in caplog.text)
assert ('jkl' in caplog.text)
assert ('mno' in caplog.text)
fenics.MPI.barrier(fenics.MPI.comm_world)
caplog.clear()
cashocs.set_log_level(cashocs.LogLevel.INFO)
issue_messages()
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
assert (not ('abc' in caplog.text))
assert ('def' in caplog.text)
assert ('ghi' in caplog.text)
assert ('jkl' in caplog.text)
assert ('mno' in caplog.text)
fenics.MPI.barrier(fenics.MPI.comm_world)
caplog.clear()
cashocs.set_log_level(cashocs.LogLevel.WARNING)
issue_messages()
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
assert (not ('abc' in caplog.text))
assert (not ('def' in caplog.text))
assert ('ghi' in caplog.text)
assert ('jkl' in caplog.text)
assert ('mno' in caplog.text)
fenics.MPI.barrier(fenics.MPI.comm_world)
caplog.clear()
cashocs.set_log_level(cashocs.LogLevel.ERROR)
issue_messages()
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
assert (not ('abc' in caplog.text))
assert (not ('def' in caplog.text))
assert (not ('ghi' in caplog.text))
assert ('jkl' in caplog.text)
assert ('mno' in caplog.text)
fenics.MPI.barrier(fenics.MPI.comm_world)
caplog.clear()
cashocs.set_log_level(cashocs.LogLevel.CRITICAL)
issue_messages()
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
assert (not ('abc' in caplog.text))
assert (not ('def' in caplog.text))
assert (not ('ghi' in caplog.text))
assert (not ('jkl' in caplog.text))
assert ('mno' in caplog.text)
fenics.MPI.barrier(fenics.MPI.comm_world)
caplog.clear() |
class ScliteJob(Job):
def __init__(self, name, refs, hyps):
self.name = name
self.refs = refs
self.hyps = hyps
self.output_sclite_dir = self.output_path('sclite-out', directory=True)
def create_stm(name, source_filename, target_filename):
py_txt = eval(generic_open(source_filename).read())
assert (isinstance(py_txt, dict) and (len(py_txt) > 0))
(example_key, example_value) = next(iter(py_txt.items()))
assert (isinstance(example_key, str) and isinstance(example_value, str))
with generic_open(target_filename, 'w') as f:
f.write(';; CATEGORY "0" "" ""\n')
f.write((';; LABEL "O" "%s" ""\n' % name))
start = 0.0
for (seq_tag, raw_txt) in sorted(py_txt.items()):
f.write(('%s 1 rec %f %f <O> %s\n' % (seq_tag, (start + 0.01), (start + 0.99), raw_txt)))
start += 1
return target_filename
def create_ctm(source_filename, target_filename):
py_txt = eval(generic_open(source_filename).read())
assert (isinstance(py_txt, dict) and (len(py_txt) > 0))
(example_key, example_value) = next(iter(py_txt.items()))
assert (isinstance(example_key, str) and isinstance(example_value, str))
with generic_open(target_filename, 'w') as f:
f.write(';; <name> <track> <start> <duration> <word> <confidence> [<n-best>]\n')
start = 0.0
for (seq_tag, raw_txt) in sorted(py_txt.items()):
f.write((';; %s (%f-%f)\n' % (seq_tag, (start + 0.01), (start + 0.99))))
if raw_txt:
words = raw_txt.split()
word_duration = (0.9 / len(words))
for i in range(len(words)):
f.write(('%s 1 %f %f %s\n' % (seq_tag, ((start + 0.01) + (i * word_duration)), word_duration, words[i])))
start += 1
return target_filename
def run(self):
stm_filename = self.create_stm(self.name, self.refs.get_path(), 'refs.stm')
ctm_filename = self.create_ctm(self.hyps.get_path(), 'hyps.ctm')
args = [('%s/SCTK/bin/sclite' % tk.gs.BASE_DIR), '-r', stm_filename, 'stm', '-h', ctm_filename, 'ctm', '-e', 'utf-8', '-o', 'all', '-o', 'dtl', '-o', 'lur', '-n', 'sclite', '-O', self.output_sclite_dir.get_path()]
print(('$ %s' % ' '.join(args)))
for sclite_stdout_line in subprocess.check_output(args).splitlines():
if (not sclite_stdout_line.strip()):
continue
if (b'Performing alignments for file' in sclite_stdout_line):
continue
if (b'Segments For Channel' in sclite_stdout_line):
continue
print(sclite_stdout_line.decode('utf8'))
raise NotImplementedError
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 1, 'time': 0.1}, mini_task=True)) |
def from_dr_metadata(d: dr.Metadata) -> Metadata:
fields = [from_dr_field(x) for x in d.fields]
kernels = [from_dr_kernel(x) for x in d.kernels]
required_caps = []
for cap in d.required_caps:
if (cap.value == 1):
required_caps += [cap.key]
else:
required_caps += [f'{cap.key}={cap.value}']
root_buffer_size = d.root_buffer_size
return Metadata(fields, kernels, required_caps, root_buffer_size) |
class Compare(Expr):
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
if (not result):
return False
value = new_value
except Exception:
raise Impossible()
return result |
def square_matrix(x):
assert_tensor(x)
shape = x.get_shape()
if ((len(shape) != 2) or (shape[0] != shape[1])):
return (False, f'expected a square matrix, got shape {shape}')
return (True, None) |
class Ex2Job(IndependentJob):
def __init__(self, aggregator, p, data_source, prob_label, rep, job_func, prob_param):
walltime = (60 * 59)
memory = (int(((tr_proportion * sample_size) * 0.01)) + 50)
IndependentJob.__init__(self, aggregator, walltime=walltime, memory=memory)
self.p = p
self.data_source = data_source
self.prob_label = prob_label
self.rep = rep
self.job_func = job_func
self.prob_param = prob_param
def compute(self):
p = self.p
data_source = self.data_source
r = self.rep
prob_param = self.prob_param
job_func = self.job_func
data = data_source.sample(sample_size, seed=r)
with util.ContextTimer() as t:
(tr, te) = data.split_tr_te(tr_proportion=tr_proportion, seed=(r + 21))
prob_label = self.prob_label
logger.info(('computing. %s. prob=%s, r=%d, param=%.3g' % (job_func.__name__, prob_label, r, prob_param)))
job_result = job_func(p, data_source, tr, te, r)
result = SingleResult(job_result)
self.aggregator.submit_result(result)
func_name = job_func.__name__
logger.info(('done. ex2: %s, prob=%s, r=%d, param=%.3g. Took: %.3g s ' % (func_name, prob_label, r, prob_param, t.secs)))
fname = ('%s-%s-n%d_r%d_p%g_a%.3f_trp%.2f.p' % (prob_label, func_name, sample_size, r, prob_param, alpha, tr_proportion))
glo.ex_save_result(ex, job_result, prob_label, fname) |
class MockCmdLineArgs():
quiet = True
MODEL = 'name'
path = dataset_path
path_labels = None
label = 'folder'
port = 0 |
def save_in_word2vec_format(vecs: np.ndarray, words: np.ndarray, fname: str):
with open(fname, 'w', encoding='utf-8') as f:
f.write((((str(len(vecs)) + ' ') + '300') + '\n'))
for (i, (v, w)) in tqdm.tqdm_notebook(enumerate(zip(vecs, words))):
vec_as_str = ' '.join([str(x) for x in v])
f.write((((w + ' ') + vec_as_str) + '\n')) |
class PylayersGUI(HasTraits):
laynames = ([''] + np.sort(os.listdir((basename + '/struc/lay/'))).tolist())
Lay_Enum = Enum(laynames)
av_ant = ['Omni', 'Gauss', 'aperture']
antext = ['vsh3', 'sh3']
for fname in os.listdir((basename + '/ant')):
if (fname.split('.')[(- 1)] in antext):
av_ant.append(fname)
xmin = DL.L.ax[0]
xmax = DL.L.ax[1]
ymin = DL.L.ax[2]
ymax = DL.L.ax[3]
zmin = 0.0
zmax = (DL.L.maxheight - 0.1)
aX = Range(low='xmin', high='xmax', value=float((xmin + (xmax / 2.0))))
aY = Range(low='ymin', high='ymax', value=float((ymin + (ymax / 2.0))))
aZ = Range(low='zmin', high='zmax', value=float((zmin + (zmax / 2.0))))
agamma = Range(float((- 3.14)), float(3.14), 0.0)
abeta = Range(float((- 3.14)), float(3.14), 0.0)
aalpha = Range(float((- 3.14)), float(3.14), 0.0)
a_ant = Enum(av_ant)
bX = Range(low='xmin', high='xmax', value=float((xmin + (xmax / 2.0))))
bY = Range(low='ymin', high='ymax', value=float((ymin + (ymax / 2.0))))
bZ = Range(low='zmin', high='zmax', value=float((zmin + (zmax / 2.0))))
bgamma = Range(float((- 3.14)), float(3.14), 0.0)
bbeta = Range(float((- 3.14)), float(3.14), 0.0)
balpha = Range(float((- 3.14)), float(3.14), 0.0)
b_ant = Enum(av_ant)
fmmin = 0.0
fmmax = 300.0
fmin = Range(low='fmmin', high='fmmax', value=float(DL.Aa.fGHz[0]))
fmax = Range(low='fmmin', high='fmmax', value=float(DL.Aa.fGHz[(- 1)]))
fstep = Range(low=0, high=10, value=0)
scene = Instance(MlabSceneModel, ())
plot = Instance(PipelineBase)
_trait_change('Lay_Enum')
def update_L(self):
if (self.Lay_Enum != ' '):
mlab.clf()
DL.L = Layout(self.Lay_Enum, bgraphs=True)
self.xmin = DL.L.ax[0]
self.xmax = DL.L.ax[1]
self.ymin = DL.L.ax[2]
self.ymax = DL.L.ax[3]
self.zmin = 0.0
self.zmax = (DL.L.maxheight - 0.1)
(self.aX, self.aY, self.aZ) = DL.a
(self.bX, self.bY, self.bZ) = DL.b
DL.a = np.array([self.aX, self.aY, self.aZ])
DL.b = np.array([self.bX, self.bY, self.bZ])
self.cutoff = DL.cutoff
if (not hasattr(DL, '_maya_fig')):
DL._show3()
_trait_change('cutoff,threshold')
def update_cutoff_threshold(self):
DL.cutoff = self.cutoff
DL.threshold = (self.threshold / 100.0)
_trait_change('aX,aY,aZ')
def update_a(self):
self.clear_fig()
DL.a = np.array([self.aX, self.aY, self.aZ])
self.cutoff = DL.cutoff
_trait_change('bX,bY,bZ')
def update_b(self):
self.clear_fig()
DL.b = np.array([self.bX, self.bY, self.bZ])
self.cutoff = DL.cutoff
_trait_change('aalpha,abeta,agamma')
def update_Ta(self):
T = geu.MEulerAngle(self.aalpha, beta=self.abeta, gamma=self.agamma)
DL.Ta = T
self.clear_fig()
_trait_change('balpha,bbeta,bgamma')
def update_Tb(self):
T = geu.MEulerAngle(self.balpha, beta=self.bbeta, gamma=self.bgamma)
DL.Tb = T
self.clear_fig()
_trait_change('a_ant,fmin,fmax,fstep')
def update_Aa(self):
DL.Aa = Antenna(self.a_ant)
self.clear_fig()
_trait_change('b_ant,fmin,fmax,fstep')
def update_Ab(self):
DL.Ab = Antenna(self.b_ant)
self.clear_fig()
_trait_change('fmin,fmax,fstep,chann')
def update_fGHz(self):
if (self.Wstd_Enum != 'None'):
W = std.Wstandard(self.Wstd_Enum)
Wchan = W.chan[eval(self.chann)]
fcGHz = Wchan['fcGHz']
BWGHz = Wchan['BMHz']
GMHz = Wchan['GMHz']
fGHz = Wchan.fghz
DL.fGHz = np.array([fcGHz])
self.BWGHz = BWGHz
self.fmin = float(fGHz[0])
self.fmax = float(fGHz[(- 1)])
self.fstep = float((fGHz[1] - fGHz[0]))
else:
if (self.fmin < self.fmax):
DL.fGHz = np.arange(self.fmin, self.fmax, self.fstep)
elif (self.fmin == self.fmax):
DL.fGHz = np.array([self.fmin])
self.BWGHz = 5
_trait_change('Beval')
def DLeval(self):
DL.eval(verbose=False, force=self.force, cutoff=self.cutoff, threshold=(self.threshold / 100.0), diffraction=self.diffraction, nD=self.nD, nT=self.nT, nR=self.nR, applywav=self.applywav)
DL._update_show3(delrays=True)
ER = np.squeeze(DL.H.energy())
DL.R._show3(ER=ER)
self.plt_all()
def plt_all(self):
self.plt_cir()
self.plt_doa()
self.plt_dod()
self.plt_dspread()
self.plt_aspread()
def plt_cir(self):
self.figcir.clf()
ax = self.figcir.add_subplot(111)
DL.plt_cir(fig=self.figcir, ax=ax, BWGHz=self.BWGHz, Nf=5000)
self.figcir.canvas.draw()
def plt_doa(self):
self.figdoa.clf()
ax = self.figdoa.add_subplot(111, polar=True)
DL.plt_doa(polar=True, fig=self.figdoa, ax=ax)
self.figdoa.canvas.draw()
def plt_dod(self):
self.figdod.clf()
ax = self.figdod.add_subplot(111, polar=True)
DL.plt_dod(polar=True, fig=self.figdod, ax=ax)
self.figdod.canvas.draw()
def plt_dspread(self):
self.figds.clf()
ax = self.figds.add_subplot(111)
DL.plt_dspread(fig=self.figds, ax=ax)
self.figds.canvas.draw()
def plt_aspread(self):
self.figas.clf()
ax = self.figas.add_subplot(111)
DL.plt_aspread(fig=self.figas, ax=ax)
self.figas.canvas.draw()
def clear_fig(self, lf=['cir', 'doa', 'dod', 'as', 'ds']):
for f in lf:
eval((('self.fig' + f) + '.clf()'))
eval((('self.fig' + f) + '.canvas.draw()'))
render3d = Item('scene', editor=SceneEditor(scene_class=Scene), height=500, width=1500, show_label=False)
GLay = Group(Item('Lay_Enum', style='simple', label='file'), show_labels=False, label='Layout')
Wstd_Enum = Enum('None', av_wstds)
chann = Str
GWstd_None = Group(Item('fmin', label='fGHz min', style='text'), Item('fmax', label='fGHz max', style='text'), Item('fstep', label='fGHz step', style='text'), label='Frequency', show_border=True, enabled_when="Wstd_Enum == 'None'")
GWstd_std = Group(Item(name='chann', editor=EnumEditor(name='handler.channels')), label='channel', show_border=True, enabled_when="Wstd_Enum != 'None'")
GWstd = Group(Group(Item(name='Wstd_Enum', label='Wireless Standard')), GWstd_None, GWstd_std, label='Wireless Standard', show_labels=True, show_border=False)
xmin = Float
xmax = Float
ymin = Float
ymax = Float
zmin = Float
zmax = Float
Iax = Item('aX', editor=RangeEditor(low_name='xmin', high_name='xmax', format='%.1f', label_width=28, mode='auto'), label='x')
Iay = Item('aY', editor=RangeEditor(low_name='ymin', high_name='ymax', format='%.1f', label_width=28, mode='auto'), label='y')
Iaz = Item('aZ', editor=RangeEditor(low_name='zmin', high_name='zmax', format='%.1f', label_width=28, mode='auto'), label='z')
GPos_a = VGroup(Iax, Iay, Iaz, id='a', label='Position', show_border=True, show_labels=True, layout='split')
Ifile_a = Item('a_ant', label='file')
GRot_a = VGroup(Item('agamma', label='x-roll'), Item('abeta', label='y-roll'), Item('aalpha', label='z-roll'), id='Ta', label='Rotation', show_border=True, layout='split')
G_a = Group(Ifile_a, GPos_a, GRot_a, label='Antenna a', show_border=False)
Ibx = Item('bX', editor=RangeEditor(low_name='xmin', high_name='xmax', format='%.1f', label_width=28, mode='auto'), label='x')
Iby = Item('bY', editor=RangeEditor(low_name='ymin', high_name='ymax', format='%.1f', label_width=28, mode='auto'), label='y')
Ibz = Item('bZ', editor=RangeEditor(low_name='zmin', high_name='zmax', format='%.1f', label_width=28, mode='auto'), label='z')
GPos_b = Group(Ibx, Iby, Ibz, id='b', label='Position', show_border=True, layout='split')
Ifile_b = Item('b_ant', label='file')
GRot_b = Group(Item('bgamma', label='x-roll'), Item('bbeta', label='y-roll'), Item('balpha', label='z-roll'), id='Tb', label='Rotation', show_border=True, layout='split')
G_b = Group(Ifile_b, GPos_b, GRot_b, label='Antenna b', show_border=False)
force = Bool
diffraction = Bool
applywav = Bool
applywav = Bool
low_cutoff = 1
high_cutoff = 30
cutoff = Range(low='low_cutoff', high='high_cutoff', value=DL.cutoff)
threshold = Range(0, 100, 80)
nD = 2
nR = 10
nT = 10
G_advanced = Group(VGroup(Item('force', label='force', resizable=False, style='simple'), Item('cutoff', label='cutoff', editor=RangeEditor(low_name='low_cutoff', high_name='high_cutoff', label_width=28, mode='auto'), width=0.2, style='simple'), Item('threshold', label='threshold', width=0.2, style='simple'), Item('diffraction', label='diffractions', style='simple'), Item('nD', label='max nb Diffractions', enabled_when='diffraction', style='simple'), Item('nR', label='max nb Reflections', style='simple'), Item('nT', label='max nb Transmissions', style='simple'), Item('applywav', label='applywav', style='simple'), label='Ray Tracing Configuration', show_labels=True, show_border=False))
Beval = Button('Launch Ray-Tracing')
GLeft = Group(GLay, GWstd, G_advanced)
GAnt_ab = HGroup(spring, G_a, spring, G_b, spring)
GAnt_Eval = Group(GAnt_ab, HGroup(spring, Item('Beval', enabled_when="Lay_Enum != ''"), show_labels=False))
GR_0 = HSplit(GLeft, render3d, layout='split')
figcir = Instance(Figure(figsize=(8, 20)), ())
figdoa = Instance(Figure(figsize=(8, 20)), ())
figdod = Instance(Figure(figsize=(8, 20)), ())
figas = Instance(Figure(figsize=(8, 20)), ())
figds = Instance(Figure(figsize=(8, 20)), ())
GExploit = Group(Group(Item('figcir', editor=MPLFigureEditor()), label='CIR'), Group(Item('figdoa', editor=MPLFigureEditor()), label='DOA'), Group(Item('figdod', editor=MPLFigureEditor()), label='DOD'), Group(Item('figas', editor=MPLFigureEditor()), label='Ang. Spread'), Group(Item('figds', editor=MPLFigureEditor()), label='Delay Spread'), layout='tabbed')
GR_1 = HGroup(spring, GAnt_Eval, spring, GExploit)
JWidget = JupyterWidget()
JWidget.show()
view = View(VGroup(GR_0, GR_1), buttons=['Quit'], title='Pylayers GUI - beta', resizable=True, width=1.0, height=1.0, handler=WstdHandler) |
def test_all_gemm(operation: 'GemmOperationUniversal', testcase='universal'):
passed = True
minimum_operand_element_size = min(DataTypeSize[operation.A.element], DataTypeSize[operation.B.element])
opcode_class = operation.tile_description.math_instruction.opcode_class
if (opcode_class == cutlass.OpClass.Simt):
alignment = 1
else:
alignment = (128 // minimum_operand_element_size)
if ((opcode_class == cutlass.OpClass.Simt) and (operation.A.element == cutlass.int8) and (operation.A.layout == cutlass.ColumnMajor)):
alignment_m = 4
else:
alignment_m = alignment
if ((opcode_class == cutlass.OpClass.Simt) and (operation.B.element == cutlass.int8) and (operation.A.layout == cutlass.RowMajor)):
alignment_n = 4
else:
alignment_n = alignment
if ((opcode_class == cutlass.OpClass.Simt) and (operation.A.element == cutlass.int8) and (operation.B.element == cutlass.int8) and ((operation.A.layout == cutlass.RowMajor) or (operation.B.layout == cutlass.ColumnMajor))):
alignment_k = 4
else:
alignment_k = alignment
threadblock_k = operation.tile_description.threadblock_shape[2]
if (testcase == 'interleaved'):
if (operation.A.layout in [cutlass.ColumnMajorInterleaved32, cutlass.RowMajorInterleaved32]):
interleavedk = 32
else:
raise ValueError('Unknown layout')
if (testcase == 'interleaved'):
modes = [cutlass.gemm.Mode.Gemm]
problem_size_m = [interleavedk, (512 + interleavedk)]
problem_size_n = [interleavedk, (512 + interleavedk)]
problem_size_k = [interleavedk, ((threadblock_k * operation.tile_description.stages) + interleavedk)]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [1]
elif (testcase == 'multistage'):
modes = [cutlass.gemm.Mode.Gemm]
problem_size_m = [16, 528]
problem_size_n = [16, 528]
problem_size_k = [threadblock_k, ((threadblock_k * operation.tile_description.stages) + operation.tile_description.math_instruction.instruction_shape[2])]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [1]
else:
modes = [cutlass.gemm.Mode.Gemm]
batch_counts = [1, 2, 3, 5, 7]
if (operation.arch < 90):
modes.append(cutlass.gemm.Mode.GemmSplitKParallel)
problem_size_m = [alignment_m, (512 - (3 * alignment_m))]
problem_size_n = [alignment_n, (512 - (2 * alignment_n))]
if (operation.tile_description.stages is None):
stages_for_k_calc = 7
else:
stages_for_k_calc = operation.tile_description.stages
problem_size_k = [alignment_k, ((threadblock_k * stages_for_k_calc) - alignment_k), (((threadblock_k * stages_for_k_calc) * 3) - alignment_k)]
problem_alpha = [1.0]
problem_beta = [2.0]
testbed = GemmUniversalLauncher(operation, interleaved=(testcase == 'interleaved'))
for mode in modes:
for m in problem_size_m:
for n in problem_size_n:
for k in problem_size_k:
for batch_count in batch_counts:
for alpha in problem_alpha:
for beta in problem_beta:
if (testcase == 'universal'):
if ((k // batch_count) < (2 * threadblock_k)):
continue
problem_size = cutlass.gemm.GemmCoord(m, n, k)
if (operation.arch < 90):
split_k_slices = batch_count
else:
split_k_slices = 1
overridden_mode = mode
if ((mode == cutlass.gemm.Mode.Gemm) and (batch_count > 1)):
overridden_mode = cutlass.gemm.Mode.Batched
passed = testbed.run(overridden_mode, problem_size, batch_count, split_k_slices, alpha, beta)
(err,) = cudart.cudaDeviceSynchronize()
if (err != cuda.CUresult.CUDA_SUCCESS):
raise RuntimeError(('CUDA Error %s' % str(err)))
if (not passed):
return False
return passed |
def _quadratic_observer(x: tf.Tensor) -> Mapping[(Tag, Dataset)]:
return {NA: Dataset(x, quadratic(x))} |
def test_array():
array = ak.Array(['this', {'x': ['is', 1, 2, None]}])
assert (ak.type(array) == array.type)
assert isinstance(array.type, ak.types.ArrayType) |
def define_D_pair(opt):
opt_net = opt['network_D_pair']
which_model = opt_net['which_model_D']
if (which_model == 'discriminator_vgg_128'):
netD = SRGAN_arch.Discriminator_VGG_128(in_nc=opt_net['in_nc'], nf=opt_net['nf'])
elif (which_model == 'patchgan'):
netD = NLayerDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'])
elif (which_model == 'vectorgan'):
if (opt['gan_type'] in ['gan', 'ragan']):
netD = VectorDiscriminator(input_nc=(2 * opt['condition_nf']), use_sigmoid=False)
else:
netD = VectorDiscriminator(input_nc=(2 * opt['condition_nf']), use_sigmoid=True)
else:
raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model))
return netD |
def shingles(text, char_ngram=5):
return set((text[head:(head + char_ngram)] for head in range(0, (len(text) - char_ngram)))) |
def resnetish10(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNetish:
return _resnetish('resnetish18', BasicBlock, [1, 1, 1, 1], pretrained, progress, **kwargs) |
def slice_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, start=None, stop=None, step=None):
dy = grad_inputs[0]
x0_shape = input_shapes[0]
ctx = nn.get_current_context()
df = SliceDataGrad(ctx, start, stop, step)
df.xshape = x0_shape
dx0 = df(dy)
return dx0 |
def zero_one_loss_calc(TP, POP):
try:
length = POP
return (length - sum(TP.values()))
except Exception:
return 'None' |
def semseg_png(score, dataset=None, img_info=None, output_folder=None, semseg=None, target=None):
semseg_pres_dir = os.path.join(output_folder, 'semseg_pres')
if (not os.path.exists(semseg_pres_dir)):
os.makedirs(semseg_pres_dir)
im_name = img_info['file_name']
extra_fields = dataset.extra_fields
name_trans = (extra_fields['name_trans'] if ('name_trans' in extra_fields) else ['jpg', 'png'])
save_semseg_pres = os.path.join(semseg_pres_dir, im_name.replace(name_trans[0], name_trans[1]))
cv2.imwrite(save_semseg_pres, score.astype(np.uint8))
if (target is not None):
semseg_gt_dir = os.path.join(output_folder, 'semseg_gt')
label = target.get_field('semsegs').semseg.squeeze(0).numpy()
if (not os.path.exists(semseg_gt_dir)):
os.makedirs(semseg_gt_dir)
save_semseg_gt = os.path.join(semseg_gt_dir, im_name.replace(name_trans[0], name_trans[1]))
cv2.imwrite(save_semseg_gt, label.astype(np.uint8)) |
class MPNetForTokenClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _baseset_picker(args):
if (args.baseset == 'CIFAR10'):
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
clean_trainset = torchvision.datasets.CIFAR10(root='~/data', train=True, download=True, transform=transform_train)
clean_trainloader = torch.utils.data.DataLoader(clean_trainset, batch_size=128, shuffle=False, num_workers=2)
elif (args.baseset == 'CIFAR100'):
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0., 0., 0.), (0., 0., 0.))])
clean_trainset = torchvision.datasets.CIFAR100(root='~/data', train=True, download=True, transform=transform_train)
clean_trainloader = torch.utils.data.DataLoader(clean_trainset, batch_size=128, shuffle=False, num_workers=2)
elif (args.baseset == 'SVHN'):
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4376821, 0.4437697, 0.), (0., 0., 0.))])
base_trainset = torchvision.datasets.SVHN(root='~/data', split='train', download=True, transform=transform_train)
clean_trainset = _CIFAR100_label_noise(base_trainset, args.label_path)
clean_trainloader = torch.utils.data.DataLoader(clean_trainset, batch_size=128, shuffle=False, num_workers=2)
else:
raise NotImplementedError
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
testset = torchvision.datasets.CIFAR10(root='~/data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
return (clean_trainset, clean_trainloader, testset, testloader) |
def recursive_redirect_lookup(redirects, word):
if (word in redirects):
try:
return recursive_redirect_lookup(redirects, redirects[word])
except RecursionError:
return word
else:
return word |
def compare_collocation(word):
corp1_collocates = ct.collocator(ct.tokenize(ct.ldcorpus(corpus1)), word, stat='MI')
corp2_collocates = ct.collocator(ct.tokenize(ct.ldcorpus(corpus2)), word, stat='MI')
print(f'''
Collocates for the word `{word}`: {corpus1}''')
ct.head(corp1_collocates, hits=20)
print(f'''
Collocates for the word `{word}`: {corpus2}''')
ct.head(corp2_collocates, hits=20) |
class cross_GNN(MessagePassing):
def __init__(self, dim, hidden_layer):
super(cross_GNN, self).__init__(aggr='mean')
def forward(self, x, edge_index, edge_weight=None):
x = x.squeeze()
return self.propagate(edge_index, x=x, edge_weight=edge_weight)
def message(self, x_i, x_j, edge_weight):
pairwise_analysis = (x_i * x_j)
if (edge_weight != None):
interaction_analysis = (pairwise_analysis * edge_weight.view((- 1), 1))
else:
interaction_analysis = pairwise_analysis
return interaction_analysis
def update(self, aggr_out):
return aggr_out |
class OneColorSpaceInvadersWorld(SpaceInvadersWorld):
shield_class = WhiteShield
invader_class = WhiteLeftRightMovingInvader |
_utils.test()
def test_non_static_in():
with pytest.raises(ti.TaichiCompilationError, match='"In" is only supported inside `ti.static`.'):
def foo(a: ti.template()) -> ti.i32:
b = 0
if (a in [ti.i32, ti.u32]):
b = 1
return b
foo(ti.i32) |
def test_var_test_case(test_case_mock):
ref = vr.VariableReference(test_case_mock, int)
assert (ref.test_case == test_case_mock) |
(scope='function')
def montecarlo_main_loop_config(config_montecarlo_1e5_verysimple):
montecarlo_configuration.LEGACY_MODE_ENABLED = True
config_montecarlo_1e5_verysimple.montecarlo.last_no_of_packets = 100000.0
config_montecarlo_1e5_verysimple.montecarlo.no_of_virtual_packets = 0
config_montecarlo_1e5_verysimple.montecarlo.iterations = 1
config_montecarlo_1e5_verysimple.plasma.line_interaction_type = 'macroatom'
del config_montecarlo_1e5_verysimple['config_dirname']
return config_montecarlo_1e5_verysimple |
def simPushInt32OntoStack(stackHandle, value):
ret = lib.simPushInt32OntoStack(stackHandle, value)
_check_return(ret) |
def hash_sequence(seq, ksize):
global hashing_fn, hashing_ksize
if ((hashing_fn is None) or (hashing_ksize != ksize)):
kh = khmer.Nodetable(ksize, 1, 1)
(hashing_fn, hashing_ksize) = (kh.get_kmer_hashes, ksize)
return hashing_fn(seq) |
def test_context_manager_decorator():
class Ctx():
def __init__(self) -> None:
self.did_start = False
self.should_pass = False
def mgr(self, name: str):
self.start(name)
(yield)
self.stop()
def start(self, name: str):
if (name == 'pass'):
self.did_start = True
def stop(self):
if self.did_start:
self.should_pass = True
ctx = Ctx()
def prog(A: dace.float64[20]):
with ctx.mgr('pass'):
A[:] = 0
A = np.random.rand(20)
prog(A)
assert ctx.should_pass |
def train(args, train_dataset, model, tokenizer):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_vocabulary(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
.parametrize('sql', ['select 1 -- foo', 'select 1 # foo'])
def test_single_line_comments(sql):
p = sqlparse.parse(sql)[0]
assert (len(p.tokens) == 5)
assert (p.tokens[(- 1)].ttype == T.Comment.Single) |
def construct_model():
if args.nf:
chain = []
for i in range(args.depth):
chain.append(layers.PlanarFlow(2))
return layers.SequentialFlow(chain)
else:
chain = []
for i in range(args.depth):
if args.glow:
chain.append(layers.BruteForceLayer(2))
chain.append(layers.CouplingLayer(2, swap=((i % 2) == 0)))
return layers.SequentialFlow(chain) |
class ZenodoDownloadError(ZenodoException):
def __init__(self):
super().__init__('An error occurred while downloading the dataset from Zenodo.') |
def append_beams(obj, beams):
for b in beams[0]:
if ('-' in b):
(former, latter) = b.split('-')
obj.beams.append(former, latter)
else:
obj.beams.append(b) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.