code stringlengths 101 5.91M |
|---|
class ResidualBlock(nn.Module):
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim_out, affine=True), nn.ReLU(inplace=True), nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return (x + self.main(x)) |
def frame_ans(question_utter, ques_string, dialogUtterance, ans_string, wh):
article = (question_utter.speaker + ' asked ')
article += ques_string
if wh:
article += ((' and ' + dialogUtterance.speaker) + ' replied ')
article += ans_string
print('answer->')
print(article)
return article
if ((dialogUtterance.act_tag == 'ny') or (dialogUtterance.act_tag == 'na')):
article += ((' ' + dialogUtterance.speaker) + ' agreed .')
else:
article += ((' ' + dialogUtterance.speaker) + ' disagreed .')
print('answer->')
print(article)
return article |
def create_objective(sim_space: optplan.SimulationSpace) -> Tuple[(optplan.Function, List[optplan.Monitor])]:
wg_source = optplan.WaveguideModeSource(center=[(- 1770), 0, 0], extents=[GRID_SPACING, 1500, 600], normal=[1, 0, 0], mode_num=0, power=1.0)
overlap_1550 = optplan.WaveguideModeOverlap(center=[1730, (- 500), 0], extents=[GRID_SPACING, 1500, 600], mode_num=0, normal=[1, 0, 0], power=1.0)
overlap_1300 = optplan.WaveguideModeOverlap(center=[1730, 500, 0], extents=[GRID_SPACING, 1500, 600], mode_num=0, normal=[1, 0, 0], power=1.0)
power_objs = []
monitor_list = []
for (wlen, overlap) in zip([1300, 1550], [overlap_1300, overlap_1550]):
epsilon = optplan.Epsilon(simulation_space=sim_space, wavelength=wlen)
sim = optplan.FdfdSimulation(source=wg_source, solver=('local_direct' if SIM_2D else 'maxwell_cg'), wavelength=wlen, simulation_space=sim_space, epsilon=epsilon)
monitor_list.append(optplan.FieldMonitor(name='field{}'.format(wlen), function=sim, normal=[0, 0, 1], center=[0, 0, 0]))
if (wlen == 1300):
monitor_list.append(optplan.FieldMonitor(name='epsilon', function=epsilon, normal=[0, 0, 1], center=[0, 0, 0]))
overlap = optplan.Overlap(simulation=sim, overlap=overlap)
power = (optplan.abs(overlap) ** 2)
power_objs.append(power)
monitor_list.append(optplan.SimpleMonitor(name='power{}'.format(wlen), function=power))
obj = 0
for power in power_objs:
obj += ((1 - power) ** 2)
monitor_list.append(optplan.SimpleMonitor(name='objective', function=obj))
return (obj, monitor_list) |
def test_estimate_competence_ratio_batch():
n_samples = 10
x = np.array([0, 1, 2, 3, 4, 5, 6]).reshape((- 1), 1)
y = np.array([0, 0, 0, 0, 1, 1, 1])
clf1 = create_base_classifier(np.array([1, 0, 1, 0, 0, 0, 0]))
clf2 = create_base_classifier(np.array([1, 0, 0, 0, 1, 0, 0]))
clf3 = create_base_classifier(np.array([0, 0, 1, 0, 1, 1, 0]))
pool_classifiers = [clf1, clf2, clf3]
target = DESKNN(pool_classifiers, k=7, pct_accuracy=1, pct_diversity=1, metric='ratio')
target.fit(x, y)
neighbors = np.tile([0, 1, 2, 3, 4, 5, 6], (n_samples, 1))
(competences, diversity) = target.estimate_competence(neighbors)
assert np.allclose(competences, [(2.0 / 7), (4.0 / 7), (5.0 / 7)])
assert np.allclose(diversity, [2.166, 3.666, 4.5], atol=0.01) |
class Pool(multiprocessing.pool.Pool):
def _setup_queues(self):
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _repopulate_pool(self):
for i in range((self._processes - len(self._pool))):
args = (self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild)
if hasattr(self, '_wrap_exception'):
args += (self._wrap_exception,)
w = self.Process(target=clean_worker, args=args)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker') |
def RandomNewmanWattsStrogatz(n, k, p, seed=None):
if (seed is None):
seed = int((current_randstate().long_seed() % sys.maxsize))
import networkx
return Graph(networkx.newman_watts_strogatz_graph(n, k, p, seed=seed)) |
def _ignore_torch_cuda_oom():
try:
(yield)
except RuntimeError as e:
if ('CUDA out of memory. ' in str(e)):
pass
else:
raise |
class FlaxBertModelTester(unittest.TestCase):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = BertConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, attention_mask)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return (config, inputs_dict)
def prepare_config_and_inputs_for_decoder(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask) = config_and_inputs
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask) |
def register_Ns3GammaRandomVariable_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('GetAlpha', 'double', [], is_const=True)
cls.add_method('GetBeta', 'double', [], is_const=True)
cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')])
cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
cls.add_method('GetValue', 'double', [], is_virtual=True)
cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True)
return |
def _parent_name(target):
r = target.rsplit('.', 1)
if (len(r) == 1):
return ('', r[0])
else:
return (r[0], r[1]) |
def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'nvcc --version', 'release .+ V(.*)') |
class MobileNetV2(nn.Module):
def __init__(self, variant: str=None):
super().__init__()
self.out_indices = [3, 6, 13, 17]
self.channels = [24, 32, 96, 320]
input_channel = 32
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
self.features = nn.ModuleList([ConvModule(3, input_channel, 3, 2, 1)])
for (t, c, n, s) in inverted_residual_setting:
output_channel = c
for i in range(n):
stride = (s if (i == 0) else 1)
self.features.append(InvertedResidual(input_channel, output_channel, stride, t))
input_channel = output_channel
def forward(self, x: Tensor) -> Tensor:
outs = []
for (i, m) in enumerate(self.features):
x = m(x)
if (i in self.out_indices):
outs.append(x)
return outs |
def gamma_list_to_cyclotomic(galist):
resu = defaultdict(int)
for n in galist:
eps = sgn(n)
for d in divisors(abs(n)):
resu[d] += eps
return (sorted((d for d in resu for k in range(resu[d]))), sorted((d for d in resu for k in range((- resu[d]))))) |
def create_differentiability_info(signature, non_differentiable_arg_names, output_differentiability, autograd_fn):
return {'signature': signature, 'non_differentiable_arg_names': non_differentiable_arg_names, 'output_differentiability': output_differentiability, 'autograd_fn': autograd_fn} |
def resblock(x_init, channels, use_bias=True, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias)
x = instance_norm(x)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias)
x = instance_norm(x)
return (x + x_init) |
def extract_tags(title: str) -> List[str]:
tags: List[str] = []
for x in title.split('] ')[:(- 1)]:
if (x[0] != '['):
raise ValueError(f'No starting [ for tag: {x}]')
tags.append(x[1:].lower())
return tags |
def test_poiless_model_empty_string(backend):
spec = {'channels': [{'name': 'channel', 'samples': [{'name': 'goodsample', 'data': [10.0], 'modifiers': [{'type': 'normsys', 'name': 'shape', 'data': {'hi': 0.5, 'lo': 1.5}}]}]}]}
model = pyhf.Model(spec, poi_name='')
data = ([12] + model.config.auxdata)
pyhf.infer.mle.fit(data, model)
with pytest.raises(pyhf.exceptions.UnspecifiedPOI):
pyhf.infer.mle.fixed_poi_fit(1.0, data, model)
with pytest.raises(pyhf.exceptions.UnspecifiedPOI):
pyhf.infer.hypotest(1.0, data, model) |
def main(backbone: str, checkpoint: Path, dataset: str, split: str='test', device: str='cuda', batch_size: int=128, num_workers: int=0, output_parquet: Optional[Path]=None) -> None:
model = build_backbone(backbone, checkpoint, device)
logger.info(f'Loaded backbone {backbone} from {checkpoint}')
dataset_transform = get_dataset_transform(backbone)
initialized_dataset = get_dataset(dataset, split, dataset_transform)
dataloader = DataLoader(initialized_dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
logger.info(f'Loaded dataset {dataset} ({split} split)')
embeddings_df = predict_embeddings(dataloader, model, device=device)
cast_embeddings_to_numpy(embeddings_df)
if (output_parquet is None):
output_parquet = (((Path('data/features') / dataset) / split) / checkpoint.with_suffix('.parquet.gzip').name)
output_parquet.parent.mkdir(parents=True, exist_ok=True)
embeddings_df.to_parquet(output_parquet, index=False, compression='gzip')
logger.info(f'Saved embeddings to {output_parquet}') |
def test_pickle():
obj = DemoClass()
s = pickle_dumps(obj.method)
inst = pickle_loads(s)
assert_equal(inst(), 42) |
def reduce_paramsets_requirements(paramsets_requirements, paramsets_user_configs):
reduced_paramsets_requirements = {}
paramset_keys = ['paramset_type', 'n_parameters', 'is_scalar', 'inits', 'bounds', 'auxdata', 'factors', 'sigmas', 'fixed']
for paramset_name in list(paramsets_requirements):
paramset_requirements = paramsets_requirements[paramset_name]
paramset_user_configs = paramsets_user_configs.get(paramset_name, {})
combined_paramset = {}
for k in paramset_keys:
for paramset_requirement in paramset_requirements:
v = paramset_requirement.get(k, 'undefined')
combined_paramset.setdefault(k, set()).add(v)
if (len(combined_paramset[k]) != 1):
raise exceptions.InvalidNameReuse(f"Multiple values for '{k}' ({list(combined_paramset[k])}) were found for {paramset_name}. Use unique modifier names when constructing the pdf.")
default_v = combined_paramset[k].pop()
v = paramset_user_configs.get(k, default_v)
if (v == 'undefined'):
continue
if isinstance(v, tuple):
v = list(v)
elif (isinstance(v, list) and default_v and (len(v) != len(default_v))):
raise exceptions.InvalidModel(f'Incorrect number of values ({len(v)}) for {k} were configured by you, expected {len(default_v)}.')
elif (v and (default_v == 'undefined')):
raise exceptions.InvalidModel(f'{paramset_name} does not use the {k} attribute.')
combined_paramset[k] = v
combined_paramset['name'] = paramset_name
reduced_paramsets_requirements[paramset_name] = combined_paramset
return reduced_paramsets_requirements |
def train(solver, snapshot, gpus, timing=False):
uid = caffe.NCCL.new_uid()
caffe.init_log()
caffe.log(('Using devices %s' % str(gpus)))
procs = []
for rank in range(len(gpus)):
p = Process(target=solve, args=(solver, snapshot, gpus, timing, uid, rank))
p.daemon = True
p.start()
procs.append(p)
for p in procs:
p.join() |
def test_results():
results = glob.glob('test_predictor_outputs/X_prediction_results.csv')
assert (len(results) == 1) |
def image_augmentation_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, shape=None, pad=(0, 0), min_scale=1.0, max_scale=1.0, angle=0.0, aspect_ratio=1.0, distortion=0.0, flip_lr=False, flip_ud=False, brightness=0.0, brightness_each=False, contrast=1.0, contrast_center=0.0, contrast_each=False, noise=0.0, seed=(- 1)):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('image_augmentation_backward is not implemented.') |
class RGBArrayAsObservationWrapper(dm_env.Environment):
'\n\tUse env.render(rgb_array) as observation\n\trather than the observation environment provides\n\n\tFrom:
def __init__(self, env, ml1, width=84, height=84, max_path_length=125, camera_name='corner'):
self._env = env
self.ml1 = ml1
self._width = width
self._height = height
self.camera_name = camera_name
self.max_path_length = max_path_length
dummy_feat = self._env.reset()
dummy_obs = self.get_frame()
self.observation_space = spaces.Box(low=0, high=255, shape=dummy_obs.shape, dtype=dummy_obs.dtype)
self.action_space = self._env.action_space
wrapped_action_spec = self.action_space
if (not hasattr(wrapped_action_spec, 'minimum')):
wrapped_action_spec.minimum = (- np.ones(wrapped_action_spec.shape))
if (not hasattr(wrapped_action_spec, 'maximum')):
wrapped_action_spec.maximum = np.ones(wrapped_action_spec.shape)
self._action_spec = specs.BoundedArray(wrapped_action_spec.shape, np.float32, wrapped_action_spec.minimum, wrapped_action_spec.maximum, 'action')
self._obs_spec = {}
self._obs_spec['pixels'] = specs.BoundedArray(shape=self.observation_space.shape, dtype=np.uint8, minimum=0, maximum=255, name='observation')
self._obs_spec['features'] = specs.Array(shape=dummy_feat.shape, dtype=np.float32, name='observation')
def reset(self, **kwargs):
task = random.choice(self.ml1.train_tasks)
self._env.set_task(task)
self.episode_step = 0
obs = {}
obs['features'] = self._env.reset(**kwargs).astype(np.float32)
obs['pixels'] = self.get_frame()
obs['goal_achieved'] = False
return obs
def step(self, action):
(observation, reward, done, info) = self._env.step(action)
obs = {}
obs['features'] = observation.astype(np.float32)
obs['pixels'] = self.get_frame()
obs['goal_achieved'] = info['success']
self.episode_step += 1
if (self.episode_step == self.max_path_length):
done = True
return (obs, reward, done, info)
def observation_spec(self):
return self._obs_spec
def action_spec(self):
return self._action_spec
def render(self, mode='rgb_array', width=256, height=256):
if (mode == 'rgb_array'):
frame = self._env.render(offscreen=True, camera_name=self.camera_name)
frame = cv2.resize(frame, (width, height))
return frame
else:
self._env.render()
def get_frame(self):
frame = self._env.render(offscreen=True, camera_name=self.camera_name)
frame = cv2.resize(frame, (self._width, self._height))
return frame
def __getattr__(self, name):
return getattr(self._env, name) |
def pythonify(tensor):
array = tensor.numpy()
if isinstance(array, np.ndarray):
return array.tolist()
elif isinstance(array, bytes):
return array.decode()
elif isinstance(array, (int, np.int32, np.int64)):
return int(array)
else:
raise ValueError(array) |
class FiniteFields(CategoryWithAxiom):
def extra_super_categories(self):
return [EnumeratedSets().Finite()]
def __contains__(self, x):
from sage.categories.fields import Fields
return ((x in Fields()) and x.is_finite())
def _call_(self, x):
raise TypeError(('unable to canonically associate a finite field to %s' % x))
class ParentMethods():
pass
class ElementMethods():
pass |
def make_sdfg(implementation, dtype, storage=dace.StorageType.Default):
n = dace.symbol('n', dace.int64)
sdfg = dace.SDFG('linalg_cholesky_{}_{}'.format(implementation, dtype))
state = sdfg.add_state('dataflow')
inp = sdfg.add_array('xin', [n, n], dtype)
out = sdfg.add_array('xout', [n, n], dtype)
xin = state.add_read('xin')
xout = state.add_write('xout')
chlsky_node = Cholesky('cholesky', lower=True)
chlsky_node.implementation = implementation
state.add_memlet_path(xin, chlsky_node, dst_conn='_a', memlet=Memlet.from_array(*inp))
state.add_memlet_path(chlsky_node, xout, src_conn='_b', memlet=Memlet.from_array(*out))
return sdfg |
def _jump_lengths_individual(traj):
if (len(traj) == 1):
return []
lats_lngs = traj.sort_values(by=constants.DATETIME)[[constants.LATITUDE, constants.LONGITUDE]].values
lengths = np.array([getDistanceByHaversine(lats_lngs[i], lats_lngs[(i - 1)]) for i in range(1, len(lats_lngs))])
return lengths |
class _AvgPoolNd(Module):
def extra_repr(self):
return 'kernel_size={}, stride={}, padding={}'.format(self.kernel_size, self.stride, self.padding) |
def ssim_exact(img1, img2, sd=1.5, C1=(0.01 ** 2), C2=(0.03 ** 2)):
mu1 = ndimage.gaussian_filter(img1, sd)
mu2 = ndimage.gaussian_filter(img2, sd)
mu1_sq = np.multiply(mu1, mu1)
mu2_sq = np.multiply(mu2, mu2)
mu1_mu2 = np.multiply(mu1, mu2)
sigma1_sq = (ndimage.gaussian_filter((img1 * img1), sd) - mu1_sq)
sigma2_sq = (ndimage.gaussian_filter((img2 * img2), sd) - mu2_sq)
sigma12 = (ndimage.gaussian_filter((img1 * img2), sd) - mu1_mu2)
ssim_num = (((2 * mu1_mu2) + C1) * ((2 * sigma12) + C2))
ssim_den = (((mu1_sq + mu2_sq) + C1) * ((sigma1_sq + sigma2_sq) + C2))
ssim_map = (ssim_num / ssim_den)
v1 = ((2.0 * sigma12) + C2)
v2 = ((sigma1_sq + sigma2_sq) + C2)
cs = np.mean((v1 / v2))
return (np.mean(ssim_map), cs) |
class QueryResponseDataset(Dataset):
def __init__(self, tokenizer: transformers.PreTrainedTokenizer, queries: Sequence[str], responses: Sequence[str], query_len: int, response_len: int):
super(QueryResponseDataset, self).__init__()
def tokenize_without_truncation(strings):
return [tokenizer(string, return_tensors='pt', truncation=False).input_ids[0] for string in strings]
sequences = [(query + response) for (query, response) in utils.zip_(queries, responses)]
queries = tokenize_without_truncation(queries)
sequences = tokenize_without_truncation(sequences)
responses = [sequence[len(query):] for (sequence, query) in utils.zip_(sequences, queries)]
filtered_pairs = [(query, response) for (query, response) in utils.zip_(queries, responses) if ((len(query) <= query_len) and (len(response) <= response_len))]
filtered_queries = [query for (query, _) in filtered_pairs]
filtered_responses = [response for (_, response) in filtered_pairs]
logger.warning(f"Filtered out {(len(queries) - len(filtered_queries))} instances out of {len(queries)} that exceed length limit... These examples are not used for training. However they won't be ignored if this is eval set that is used in `RLTrainer.evaluate`.")
def left_pad_and_stack(list_of_tensors: Sequence[torch.Tensor], target_len: int):
return torch.stack([torch_ops.left_pad(tensor, target_size=(target_len,), value=tokenizer.pad_token_id) for tensor in list_of_tensors])
queries = left_pad_and_stack(filtered_queries, query_len)
responses = left_pad_and_stack(filtered_responses, response_len)
self.queries = queries
self.responses = responses
self.query_attn_masks = queries.ne(tokenizer.pad_token_id).long()
def __getitem__(self, i):
return dict(queries=self.queries[i], responses=self.responses[i], query_attn_masks=self.query_attn_masks[i])
def __len__(self):
return len(self.queries) |
class XLMRobertaTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = XLMRobertaTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class Op():
def __init__(self, kind, inputs, attribs=None):
self.kind = kind
self.inputs = inputs
self.output = None
self.attribs = attribs
def __repr__(self):
attribs = ((' %r' % self.attribs) if self.attribs else '')
return ('<Dim.Op %r %s%s>' % (self.kind, self.inputs, attribs))
def _value(self):
return (self.kind, tuple(self.inputs), (frozenset(self.attribs.items()) if self.attribs else None))
def __hash__(self):
with util.guard_infinite_recursion(Op.__hash__, self):
return hash(self._value())
def __eq__(self, other):
if isinstance(other, Op):
return (self._value() == other._value())
return False
def __ne__(self, other):
return (not self.__eq__(other)) |
def parse_multipart_headers(iterable):
result = []
for line in iterable:
line = to_native(line)
(line, line_terminated) = _line_parse(line)
if (not line_terminated):
raise ValueError('unexpected end of line in multipart header')
if (not line):
break
elif ((line[0] in ' \t') and result):
(key, value) = result[(- 1)]
result[(- 1)] = (key, ((value + '\n ') + line[1:]))
else:
parts = line.split(':', 1)
if (len(parts) == 2):
result.append((parts[0].strip(), parts[1].strip()))
return Headers(result) |
def write_sample_to_java_file(sample, java_func_dir):
couple = sample['url'].split('/')[(- 1)].split('#')
class_name = couple[0].split('.java')[0]
start = couple[1].split('-')[0].replace('L', '')
end = couple[1].split('-')[1].replace('L', '')
if ('repo' in sample.keys()):
project = sample['repo'].replace('/', '-')
else:
project = sample['nwo'].replace('/', '-')
file_name = os.path.join(java_func_dir, ((((((project + '_') + class_name) + '_') + str(start)) + '_') + str(end)))
if (not os.path.exists(file_name)):
os.makedirs(file_name)
if ('code' in sample.keys()):
function_text = sample['code']
else:
function_text = sample['function']
function_text = wrap_function_dummy_class(function_text, class_name)
wrapped_file_path = write_wrapped_function(function_text, class_name, file_name)
write_json(sample, os.path.join(file_name, (class_name + '.json')))
return (wrapped_file_path, file_name) |
class LocalTFRunner(LocalRunner):
def __init__(self, snapshot_config, sess=None, max_cpus=1):
super().__init__(snapshot_config=snapshot_config, max_cpus=max_cpus)
self.sess = (sess or tf.compat.v1.Session())
self.sess_entered = False
def __enter__(self):
if (tf.compat.v1.get_default_session() is not self.sess):
self.sess.__enter__()
self.sess_entered = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if ((tf.compat.v1.get_default_session() is self.sess) and self.sess_entered):
self.sess.__exit__(exc_type, exc_val, exc_tb)
self.sess_entered = False
def make_sampler(self, sampler_cls, *, seed=None, n_workers=psutil.cpu_count(logical=False), max_path_length=None, worker_class=DefaultWorker, sampler_args=None, worker_args=None):
return super().make_sampler(sampler_cls, seed=seed, n_workers=n_workers, max_path_length=max_path_length, worker_class=TFWorkerClassWrapper(worker_class), sampler_args=sampler_args, worker_args=worker_args)
def setup(self, algo, env, sampler_cls=None, sampler_args=None, n_workers=psutil.cpu_count(logical=False), worker_class=DefaultWorker, worker_args=None):
self.initialize_tf_vars()
logger.log(self.sess.graph)
super().setup(algo, env, sampler_cls, sampler_args, n_workers, worker_class, worker_args)
def _start_worker(self):
self._sampler.start_worker()
if self._plot:
from garage.tf.plotter import Plotter
self._plotter = Plotter(self.get_env_copy(), self._algo.policy, sess=tf.compat.v1.get_default_session())
self._plotter.start()
def initialize_tf_vars(self):
with tf.name_scope('initialize_tf_vars'):
uninited_set = [e.decode() for e in self.sess.run(tf.compat.v1.report_uninitialized_variables())]
self.sess.run(tf.compat.v1.variables_initializer([v for v in tf.compat.v1.global_variables() if (v.name.split(':')[0] in uninited_set)])) |
_LAYERS.register_module(name='PConv')
class PartialConv2d(nn.Conv2d):
def __init__(self, *args, multi_channel=False, eps=1e-08, **kwargs):
super().__init__(*args, **kwargs)
self.multi_channel = multi_channel
self.eps = eps
if self.multi_channel:
(out_channels, in_channels) = (self.out_channels, self.in_channels)
else:
(out_channels, in_channels) = (1, 1)
self.register_buffer('weight_mask_updater', torch.ones(out_channels, in_channels, self.kernel_size[0], self.kernel_size[1]))
self.mask_kernel_numel = np.prod(self.weight_mask_updater.shape[1:4])
self.mask_kernel_numel = self.mask_kernel_numel.item()
def forward(self, input, mask=None, return_mask=True):
assert (input.dim() == 4)
if (mask is not None):
assert (mask.dim() == 4)
if self.multi_channel:
assert (mask.shape[1] == input.shape[1])
else:
assert (mask.shape[1] == 1)
if (mask is not None):
with torch.no_grad():
updated_mask = F.conv2d(mask, self.weight_mask_updater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation)
mask_ratio = (self.mask_kernel_numel / (updated_mask + self.eps))
updated_mask = torch.clamp(updated_mask, 0, 1)
mask_ratio = (mask_ratio * updated_mask)
if (mask is not None):
input = (input * mask)
raw_out = super().forward(input)
if (mask is not None):
if (self.bias is None):
output = (raw_out * mask_ratio)
else:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = (((raw_out - bias_view) * mask_ratio) + bias_view)
output = (output * updated_mask)
else:
output = raw_out
if (return_mask and (mask is not None)):
return (output, updated_mask)
return output |
class ListCompose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, coord, feat, label):
for t in self.transforms:
(coord, feat, label) = t(coord, feat, label)
return (coord, feat, label) |
def dist_location(dist):
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location |
def make(domain, task, task_kwargs=None, environment_kwargs=None, visualize_reward=False):
if (domain == 'cheetah'):
return cheetah.make(task, task_kwargs=task_kwargs, environment_kwargs=environment_kwargs, visualize_reward=visualize_reward)
elif (domain == 'quadruped'):
return quadruped.make(task, task_kwargs=task_kwargs, environment_kwargs=environment_kwargs, visualize_reward=visualize_reward)
elif (domain == 'humanoid'):
return humanoid.make(task, task_kwargs=task_kwargs, environment_kwargs=environment_kwargs, visualize_reward=visualize_reward)
else:
raise NotImplementedError |
_REGISTRY.register()
class LEDNetModel(BaseModel):
def __init__(self, opt):
super(LEDNetModel, self).__init__(opt)
self.net_g = build_network(opt['network_g'])
self.init_weights = self.opt['train'].get('init_weights', False)
if self.init_weights:
self.initialize_weights(self.net_g, 0.1)
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
load_path = self.opt['path'].get('pretrain_network_g', None)
if (load_path is not None):
param_key = self.opt['path'].get('param_key_g', 'params')
self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key)
if self.is_train:
self.init_training_settings()
def initialize_weights(self, net_l, scale=0.1):
if (not isinstance(net_l, list)):
net_l = [net_l]
for net in net_l:
for (n, m) in net.named_modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if (m.bias is not None):
m.bias.data.zero_()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
self.ema_decay = train_opt.get('ema_decay', 0)
if (self.ema_decay > 0):
logger = get_root_logger()
logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}')
self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
load_path = self.opt['path'].get('pretrain_network_g', None)
if (load_path is not None):
self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
else:
self.model_ema(0)
self.net_g_ema.eval()
if train_opt.get('pixel_opt'):
self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
else:
self.cri_pix = None
if train_opt.get('perceptual_opt'):
self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
else:
self.cri_perceptual = None
if ((self.cri_pix is None) and (self.cri_perceptual is None)):
raise ValueError('Both pixel and perceptual losses are None.')
self.use_side_loss = train_opt.get('use_side_loss', True)
self.side_loss_weight = train_opt.get('side_loss_weight', 0.8)
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt['train']
optim_params = []
for (k, v) in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f'Params {k} will not be optimized.')
optim_type = train_opt['optim_g'].pop('type')
self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g'])
self.optimizers.append(self.optimizer_g)
def feed_data(self, data):
self.lq = data['lq'].to(self.device)
self.gt = data['gt'].to(self.device)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
(self.side_output, self.output) = self.net_g(self.lq, side_loss=self.use_side_loss)
if self.use_side_loss:
(h, w) = self.side_output.shape[2:]
self.side_gt = torch.nn.functional.interpolate(self.gt, (h, w), mode='bicubic', align_corners=False)
l_total = 0
loss_dict = OrderedDict()
if self.cri_pix:
l_pix = self.cri_pix(self.output, self.gt)
l_total += l_pix
loss_dict['l_pix'] = l_pix
if self.use_side_loss:
l_side_pix = (self.cri_pix(self.side_output, self.side_gt) * self.side_loss_weight)
l_total += l_side_pix
loss_dict['l_side_pix'] = l_side_pix
if self.cri_perceptual:
(l_percep, _) = self.cri_perceptual(self.output, self.gt)
l_total += l_percep
loss_dict['l_percep'] = l_percep
if self.use_side_loss:
(l_side_percep, _) = self.cri_perceptual(self.side_output, self.side_gt)
l_side_percep = (l_side_percep * self.side_loss_weight)
l_total += l_side_percep
loss_dict['l_side_percep'] = l_side_percep
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if (self.ema_decay > 0):
self.model_ema(decay=self.ema_decay)
def test(self):
if (self.ema_decay > 0):
self.net_g_ema.eval()
with torch.no_grad():
self.output = self.net_g_ema(self.lq)
else:
self.net_g.eval()
with torch.no_grad():
self.output = self.net_g(self.lq)
self.net_g.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
if (self.opt['rank'] == 0):
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = (self.opt['val'].get('metrics') is not None)
if with_metrics:
self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
pbar = tqdm(total=len(dataloader), unit='image')
for (idx, val_data) in enumerate(dataloader):
img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
sr_img = tensor2img([visuals['result']])
if ('gt' in visuals):
gt_img = tensor2img([visuals['gt']])
del self.gt
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt['is_train']:
save_img_path = osp.join(self.opt['path']['visualization'], img_name, f'{img_name}_{current_iter}.png')
elif self.opt['val']['suffix']:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['val']['suffix']}.png")
else:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['name']}.png")
imwrite(sr_img, save_img_path)
if with_metrics:
for (name, opt_) in self.opt['val']['metrics'].items():
metric_data = dict(img1=sr_img, img2=gt_img)
self.metric_results[name] += calculate_metric(metric_data, opt_)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
log_str = f'''Validation {dataset_name}
'''
for (metric, value) in self.metric_results.items():
log_str += f''' # {metric}: {value:.4f}
'''
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for (metric, value) in self.metric_results.items():
tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['lq'] = self.lq.detach().cpu()
out_dict['result'] = self.output.detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt.detach().cpu()
return out_dict
def save(self, epoch, current_iter):
if (self.ema_decay > 0):
self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema'])
else:
self.save_network(self.net_g, 'net_g', current_iter)
self.save_training_state(epoch, current_iter) |
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split('\n')
i = 0
while ((i < len(lines)) and (re.search('^\\s*Returns?:\\s*$', lines[i]) is None)):
i += 1
if (i < len(lines)):
indent = len(_get_indent(lines[i]))
lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent)
func_doc = '\n'.join(lines)
else:
raise ValueError(f'''The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:
{func_doc}''')
fn.__doc__ = func_doc
return fn
return docstring_decorator |
.parametrize('supports_correct, expected', [(np.array([0.33]), (- 0.01)), (np.array([0.0]), (- 1.0)), (np.array([1.0]), 1.0)])
def test_exponential_func_multi_class(supports_correct, expected):
n_classes = 3
result = exponential_func(n_classes, supports_correct)
assert np.isclose(result, expected, atol=0.01).all() |
class ChineseCLIPOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})])
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})])
def atol_for_validation(self) -> float:
return 0.0001
def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=(- 1), seq_length: int=(- 1), framework: Optional['TensorType']=None) -> Mapping[(str, Any)]:
text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework)
image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, batch_size=batch_size, framework=framework)
return {**text_input_dict, **image_input_dict}
def default_onnx_opset(self) -> int:
return 14 |
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if (self.pool_size > 0):
self.num_imgs = 0
self.images = []
def query(self, images):
if (self.pool_size == 0):
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if (self.num_imgs < self.pool_size):
self.num_imgs = (self.num_imgs + 1)
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if (p > 0.5):
random_id = random.randint(0, (self.pool_size - 1))
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = torch.cat(return_images, 0)
return return_images |
def make_mlp(conf, d_in, d_latent=0, allow_empty=False, **kwargs):
mlp_type = conf.get_string('type', 'mlp')
if (mlp_type == 'mlp'):
net = ImplicitNet.from_conf(conf, (d_in + d_latent), **kwargs)
elif (mlp_type == 'resnet'):
net = ResnetFC.from_conf(conf, d_in, d_latent=d_latent, **kwargs)
elif ((mlp_type == 'empty') and allow_empty):
net = None
else:
raise NotImplementedError('Unsupported MLP type')
return net |
def get_time_stamp():
ct = time.time()
local_time = time.localtime(ct)
data_head = time.strftime('%Y-%m-%d %H:%M:%S', local_time)
data_secs = ((ct - int(ct)) * 1000)
time_stamp = ('%s.%03d' % (data_head, data_secs))
return time_stamp |
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--split', required=True, help='split to operate on')
parser.add_argument('--pred_file', default=None, help='prediction file')
args = parser.parse_args()
if (args.split != 'train'):
if (args.pred_file is None):
raise RuntimeError('A prediction file is required for evaluation and prediction (when split is not Train)')
print('split', args.split, 'prediction', args.pred_file)
return args |
def schedule(func: Optional[object]=None, wait: int=2, warmup: int=2, active: int=2, repeat: int=1, skip_first: int=0):
torch_scheduler = profiler.schedule(wait=wait, warmup=warmup, active=active, repeat=repeat, skip_first=skip_first)
return set_profiler_attr(func=func, set_attr='schedule', handler=torch_scheduler) |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--infile', required=True, type=str)
parser.add_argument('--outdir')
parser.add_argument('--nfold', default=10, type=int)
parser.add_argument('--nchar', default=4, type=int)
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--log', action='store_true')
parser.add_argument('--prefix', action='store_true')
parser.add_argument('--suffix', action='store_true')
return parser.parse_args() |
def create_plot(all_data, raw, x_scale, y_scale, xn, yn, fn_out, linestyles, batch):
(xm, ym) = (metrics[xn], metrics[yn])
xm['description'] = ''
hardcode_linestyles = {'USR-LSH': ('orangered', '-', 'o'), 'SB-LSH(Faiss)': ('mediumpurple', '--', 'x'), 'simHash': ('Skyblue', '--', '^')}
handles = []
labels = []
plt.figure(figsize=(12, 9))
def mean_y(algo):
(xs, ys, ls, axs, ays, als) = create_pointset(all_data[algo], xn, yn)
return (- np.log(np.array(ys)).mean())
def mean_x(algo):
(xs, ys, ls, axs, ays, als) = create_pointset(all_data[algo], xn, yn)
return (- np.log(np.array(xs)).mean())
(min_x, max_x) = (1, 0)
for algo in sorted(all_data.keys(), key=mean_x):
(xs, ys, ls, axs, ays, als) = create_pointset(all_data[algo], xn, yn)
min_x = min(([min_x] + [x for x in xs if (x > 0)]))
max_x = max(([max_x] + [x for x in xs if (x < 1)]))
(color, faded, linestyle, marker) = linestyles[algo]
if (algo in hardcode_linestyles):
(color, linestyle, marker) = hardcode_linestyles[algo]
(handle,) = plt.plot(xs, ys, '-', label=algo, color=color, ms=10, mew=3, lw=3, linestyle=linestyle, marker=marker)
handles.append(handle)
if raw:
(handle2,) = plt.plot(axs, ays, '-', label=algo, color=faded, ms=5, mew=2, lw=2, linestyle=linestyle, marker=marker)
labels.append(algo)
ax = plt.gca()
ax.set_ylabel(ym['description'], fontsize=22, labelpad=8)
ax.set_xlabel(xm['description'], fontsize=22, labelpad=8)
if (x_scale[0] == 'a'):
alpha = float(x_scale[1:])
fun = (lambda x: (1 - ((1 - x) ** (1 / alpha))))
inv_fun = (lambda x: (1 - ((1 - x) ** alpha)))
ax.set_xscale('function', functions=(fun, inv_fun))
if (alpha <= 3):
ticks = [inv_fun(x) for x in np.arange(0, 1.2, 0.2)]
plt.xticks(ticks)
if (alpha > 3):
from matplotlib import ticker
ax.xaxis.set_major_formatter(ticker.LogitFormatter())
plt.xticks([0, (1 / 2), (1 - 0.1), (1 - 0.01), (1 - 0.001), (1 - 0.0001), 1])
else:
ax.set_xscale(x_scale)
ax.set_yscale(y_scale)
box = plt.gca().get_position()
ax.legend(handles, labels, loc='upper right', prop={'size': 26})
plt.grid(b=True, which='major', color='0.65', linestyle='-')
plt.setp(ax.get_xminorticklabels(), visible=True)
if (('lim' in xm) and (x_scale != 'logit')):
(x0, x1) = xm['lim']
plt.xlim(max(x0, 0), min(x1, 1))
elif (x_scale == 'logit'):
plt.xlim(min_x, max_x)
if ('lim' in ym):
plt.ylim(ym['lim'])
ax.spines['bottom']._adjust_location()
plt.savefig(fn_out, bbox_inches='tight', format='eps')
plt.close() |
def ndcg_score(ground_truth, predictions, k=1):
lb = LabelBinarizer()
lb.fit(range((len(predictions) + 1)))
T = lb.transform(ground_truth)
scores = []
for (y_true, y_score) in zip(T, predictions):
actual = dcg_score(y_true, y_score, k)
best = dcg_score(y_true, y_true, k)
score = (float(actual) / float(best))
scores.append(score)
return np.sum(scores) |
def write_avg_to_interm_file(out_path, intermediate_file, fold_num, train_scores_list, valid_scores_list, tasks, dataset, h='CV_average'):
model_name = list(train_scores_list[0].keys())[0]
num_iteration = len(valid_scores_list)
if (num_iteration != fold_num):
return
train_metric_name_to_value_sum = dict()
valid_metric_name_to_value_sum = dict()
for (train_score, valid_score) in zip(train_scores_list, valid_scores_list):
train_score_dict = train_score[model_name]
valid_score_dict = valid_score[model_name]
if (len(tasks) > 1):
train_score_dict = train_score[model_name]['averaged']
valid_score_dict = valid_score[model_name]['averaged']
for i in train_score_dict:
this_train_score = train_score_dict[i]
this_valid_score = valid_score_dict[i]
if (i not in train_metric_name_to_value_sum):
train_metric_name_to_value_sum[i] = 0
valid_metric_name_to_value_sum[i] = 0
train_metric_name_to_value_sum[i] += this_train_score
valid_metric_name_to_value_sum[i] += this_valid_score
with open(os.path.join(out_path, intermediate_file), 'a') as f:
writer = csv.writer(f)
for i in train_metric_name_to_value_sum:
train_score_avg = (train_metric_name_to_value_sum[i] / num_iteration)
valid_score_avg = (valid_metric_name_to_value_sum[i] / num_iteration)
output_line = [dataset, model_name, i, 'train', train_score_avg, 'valid', valid_score_avg, 'fold_num', h]
writer.writerow(output_line) |
()
('backbone', type=str)
('--imagenet-dir', type=str)
('-bs', '--batch-size', default=32, type=int)
('-nw', '--num-workers', default=10, type=int)
('-gpu', '--gpu/--no-gpu', default=True, is_flag=True)
def main(backbone, imagenet_dir, batch_size, num_workers, gpu):
ptu.set_gpu_mode(gpu)
cfg = config.load_config()
cfg = cfg['model'][backbone]
cfg['backbone'] = backbone
cfg['image_size'] = (cfg['image_size'], cfg['image_size'])
dataset_kwargs = dict(dataset='imagenet', root_dir=imagenet_dir, image_size=cfg['image_size'], crop_size=cfg['image_size'], patch_size=cfg['patch_size'], batch_size=batch_size, num_workers=num_workers, split='val', normalization=STATS[cfg['normalization']])
model = create_vit(cfg)
model.to(ptu.device)
model.eval()
eval_dataset(model, dataset_kwargs) |
def test_strings_dtype():
clf = SelfTrainingClassifier(KNeighborsClassifier())
(X, y) = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
labels_multiclass = ['one', 'two', 'three']
y_strings = np.take(labels_multiclass, y)
with pytest.raises(ValueError, match='dtype'):
clf.fit(X, y_strings) |
class GradientsInputsCallback(VanillaGradientsCallback):
explainer = GradientsInputs()
default_output_subdir = 'gradients_inputs' |
def override_qengines(qfunction):
def test_fn(*args, **kwargs):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
qfunction(*args, **kwargs)
return test_fn |
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, (num_epochs - 1)))
print(('-' * 10))
for phase in ['train', 'val']:
if (phase == 'train'):
scheduler.step()
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0.0
for data in dataloaders[phase]:
(inputs, labels) = data
(now_batch_size, c, h, w) = inputs.shape
if (now_batch_size < opt.batchsize):
continue
if use_gpu:
inputs = Variable(inputs.cuda().detach())
labels = Variable(labels.cuda().detach())
else:
(inputs, labels) = (Variable(inputs), Variable(labels))
optimizer.zero_grad()
if (phase == 'val'):
with torch.no_grad():
outputs = model(inputs)
else:
outputs = model(inputs)
if (not opt.PCB):
(_, preds) = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
else:
part = {}
sm = nn.Softmax(dim=1)
num_part = 6
for i in range(num_part):
part[i] = outputs[i]
score = (((((sm(part[0]) + sm(part[1])) + sm(part[2])) + sm(part[3])) + sm(part[4])) + sm(part[5]))
(_, preds) = torch.max(score.data, 1)
loss = criterion(part[0], labels)
for i in range((num_part - 1)):
loss += criterion(part[(i + 1)], labels)
if (phase == 'train'):
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if ((int(version[0]) > 0) or (int(version[2]) > 3)):
running_loss += (loss.item() * now_batch_size)
else:
running_loss += (loss.data[0] * now_batch_size)
running_corrects += float(torch.sum((preds == labels.data)))
epoch_loss = (running_loss / dataset_sizes[phase])
epoch_acc = (running_corrects / dataset_sizes[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
y_loss[phase].append(epoch_loss)
y_err[phase].append((1.0 - epoch_acc))
if (phase == 'val'):
last_model_wts = model.state_dict()
if ((epoch % 10) == 9):
save_network(model, epoch)
draw_curve(epoch)
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
print()
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
model.load_state_dict(last_model_wts)
save_network(model, 'last')
return model |
def CossidentePenttilaGraph(q):
(p, k) = is_prime_power(q, get_data=True)
if ((not k) or (p == 2)):
raise ValueError('q(={}) must be an odd prime power'.format(q))
from sage.features.gap import GapPackage
GapPackage('grape', spkg='gap_packages').require()
from sage.libs.gap.libgap import libgap
adj_list = libgap.function_factory('function(q)\n local z, e, so, G, nu, G1, G0, B, T, s, O1, O2, x;\n LoadPackage("grape");\n G0:=SO(3,q^2);\n so:=GeneratorsOfGroup(G0);\n G1:=Group(Comm(so[1],so[2]),Comm(so[1],so[3]),Comm(so[2],so[3]));\n B:=InvariantBilinearForm(G0).matrix;\n z:=Z(q^2); e:=z; sqo:=(q^2-1)/2;\n if IsInt(sqo/Order(e^2+z^0)) then\n e:=z^First([2..q^2-2], x-> not IsInt(sqo/Order(z^(2*x)+z^0)));\n fi;\n nu:=z^First([0..q^2-2], x->z^x*(e^2+z^0)+(z^x*(e^2+z^0))^q=0*z);\n T:=function(x)\n local r;\n r:=nu*x*B*x;\n return r+r^q;\n end;\n s:=Group([Z(q)*IdentityMat(3,GF(q))]);\n O1:=Orbit(G1, Set(Orbit(s,z^0*[1,0,0])), OnSets);\n O2:=Orbit(G1, Set(Orbit(s,z^0*[1,1,e])), OnSets);\n G:=Graph(G1,Concatenation(O1,O2),OnSets,\n function(x,y) return x<>y and 0*z=T(x[1]+y[1]); end);\n return List([1..OrderGraph(G)],x->Adjacency(G,x));\n end;')
adj = adj_list(q)
G = Graph(((i, int((j - 1))) for (i, ni) in enumerate(adj) for j in ni), format='list_of_edges', multiedges=False)
G.name((('CossidentePenttila(' + str(q)) + ')'))
return G |
def find_modules(lib: str) -> Tuple[(str, str)]:
folder_name = LIB2FOLDER_DICT[lib]
if (importlib.util.find_spec(('pytorch_fw.' + folder_name)) is not None):
model_lib_module = (('pytorch_fw.' + folder_name) + '.model_lib')
quant_module = 'pytorch_fw.quant'
elif (importlib.util.find_spec(('keras_fw.' + folder_name)) is not None):
model_lib_module = (('keras_fw.' + folder_name) + '.model_lib')
quant_module = 'keras_fw.quant'
else:
raise Exception(f'Error: model library {lib} is not supported')
return (model_lib_module, quant_module) |
def test_is_duplicate_operation(agent: Agent, mocker: MockerFixture):
state = {'path/to/file1.txt': 'checksum1', 'path/to/file2.txt': 'checksum2'}
mocker.patch.object(file_ops, 'file_operations_state', (lambda _: state))
assert (file_ops.is_duplicate_operation('write', 'path/to/file1.txt', agent.config, 'checksum1') is True)
assert (file_ops.is_duplicate_operation('write', 'path/to/file1.txt', agent.config, 'checksum2') is False)
assert (file_ops.is_duplicate_operation('write', 'path/to/file3.txt', agent.config, 'checksum3') is False)
assert (file_ops.is_duplicate_operation('append', 'path/to/file1.txt', agent.config, 'checksum1') is False)
assert (file_ops.is_duplicate_operation('delete', 'path/to/file1.txt', config=agent.config) is False)
assert (file_ops.is_duplicate_operation('delete', 'path/to/file3.txt', config=agent.config) is True) |
def rows_tags(obj):
if isinstance(obj, dict):
obj = obj.items()
results = []
results.append('<table style="display:inline-table">')
for row in obj:
results.append('<tr style="padding:0">')
for item in row:
results.append(('<td style="text-align:left; vertical-align:top;' + 'padding:1px">'))
results.extend(blocks_tags(item))
results.append('</td>')
results.append('</tr>')
results.append('</table>')
return results |
def get_secrets(num):
secrets = [Secret() for _ in range(num)]
secret_values = list(range(num))
secret_dict = {x: v for (x, v) in zip(secrets, secret_values)}
return (secrets, secret_values, secret_dict) |
class TextTransform():
def __init__(self):
char_map_str = "\n ' 0\n <SPACE> 1\n a 2\n b 3\n c 4\n d 5\n e 6\n f 7\n g 8\n h 9\n i 10\n j 11\n k 12\n l 13\n m 14\n n 15\n o 16\n p 17\n q 18\n r 19\n s 20\n t 21\n u 22\n v 23\n w 24\n x 25\n y 26\n z 27\n "
self.char_map = {}
self.index_map = {}
for line in char_map_str.strip().split('\n'):
(ch, index) = line.split()
self.char_map[ch] = int(index)
self.index_map[int(index)] = ch
self.index_map[1] = ' '
def text_to_int(self, text):
int_sequence = []
for c in text:
if (c == ' '):
ch = self.char_map['<SPACE>']
else:
ch = self.char_map[c]
int_sequence.append(ch)
return int_sequence
def int_to_text(self, labels):
string = []
for i in labels:
string.append(self.index_map[i])
return ''.join(string).replace('<SPACE>', ' ') |
class TestCEM(TfGraphTestCase):
.large
def test_cem_cartpole(self):
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 10
algo = CEM(env_spec=env.spec, policy=policy, baseline=baseline, best_frac=0.1, max_path_length=100, n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
rtn = runner.train(n_epochs=10, batch_size=2048)
assert (rtn > 40)
env.close() |
def _softmax(raw, input, dim=None, _stacklevel=3):
x = raw(input, dim=dim)
if (dim is None):
dim = F._get_softmax_dim('softmax', input.dim(), _stacklevel)
bottom_blobs = [log.blobs(input)]
name = log.add_layer(name='softmax')
log.add_blobs([x], name='softmax_blob')
layer = caffe_net.Layer_param(name=name, type='Softmax', bottom=bottom_blobs, top=[log.blobs(x)])
layer.param.softmax_param.axis = dim
log.cnet.add_layer(layer)
return x |
class Tarball(object):
def __init__(self, tarball_name, package=None):
self.__filename = tarball_name
if (package is None):
self.__package = None
for pkg in Package.all():
if (pkg.tarball_filename == tarball_name):
self.__package = pkg.tarball_package
if (self.package is None):
error = 'tarball {0} is not referenced by any Sage package'.format(tarball_name)
log.error(error)
raise ValueError(error)
else:
self.__package = package
if (package.tarball_filename != tarball_name):
error = 'tarball {0} is not referenced by the {1} package'.format(tarball_name, package.name)
log.error(error)
raise ValueError(error)
def __repr__(self):
return 'Tarball {0}'.format(self.filename)
def filename(self):
return self.__filename
def package(self):
return self.__package
def upstream_fqn(self):
return os.path.join(SAGE_DISTFILES, self.filename)
def __eq__(self, other):
return (self.filename == other.filename)
def _compute_hash(self, algorithm):
with open(self.upstream_fqn, 'rb') as f:
while True:
buf = f.read(1048576)
if (not buf):
break
algorithm.update(buf)
return algorithm.hexdigest()
def _compute_sha1(self):
import hashlib
return self._compute_hash(hashlib.sha1())
def _compute_md5(self):
import hashlib
return self._compute_hash(hashlib.md5())
def _compute_cksum(self):
from sage_bootstrap.cksum import CksumAlgorithm
return self._compute_hash(CksumAlgorithm())
def checksum_verifies(self):
sha1 = self._compute_sha1()
return (sha1 == self.package.sha1)
def is_distributable(self):
return ('do-not-distribute' not in self.filename)
def download(self, allow_upstream=False):
if (not self.filename):
raise ValueError('non-normal package does define a tarball, so cannot download')
destination = self.upstream_fqn
if os.path.isfile(destination):
if self.checksum_verifies():
log.info('Using cached file {destination}'.format(destination=destination))
return
else:
log.warning('Invalid checksum; ignoring cached file {destination}'.format(destination=destination))
successful_download = False
log.info('Attempting to download package {0} from mirrors'.format(self.filename))
for mirror in MirrorList():
url = mirror.replace('${SPKG}', self.package.name)
if (not url.endswith('/')):
url += '/'
url += self.filename
log.info(url)
try:
Download(url, destination).run()
successful_download = True
break
except IOError:
log.debug('File not on mirror')
if (not successful_download):
url = self.package.tarball_upstream_url
if (allow_upstream and url):
log.info('Attempting to download from {}'.format(url))
try:
Download(url, destination).run()
except IOError:
raise FileNotMirroredError('tarball does not exist on mirror network and neither at the upstream URL')
else:
raise FileNotMirroredError('tarball does not exist on mirror network')
if (not self.checksum_verifies()):
raise ChecksumError('checksum does not match')
def save_as(self, destination):
import shutil
shutil.copy(self.upstream_fqn, destination) |
_utils.test()
def test_nested_loops():
x = ti.field(ti.i32)
n = 2048
ti.root.dense(ti.ij, n).place(x)
def paint():
for i in range(n):
for j in range(n):
x[(0, 0)] = i
paint() |
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
assert (self.start == 0)
return self.data[idx] |
.skipif((not require_gpu), reason='STELLARGRAPH_MUST_USE_GPU is not set to 1, so a GPU does not have to be used')
def test_on_gpu_when_requested():
tf.debugging.set_log_device_placement(True)
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
assert (c.numpy().shape == (2, 2))
assert tf.config.list_physical_devices('GPU') |
class SolcError(Exception):
message = 'An error occurred during execution'
def __init__(self, message: str=None, command: List=None, return_code: int=None, stdin_data: str=None, stdout_data: str=None, stderr_data: str=None, error_dict: Dict=None) -> None:
if (message is not None):
self.message = message
self.command = (command or [])
self.return_code = return_code
self.stdin_data = stdin_data
self.stderr_data = stderr_data
self.stdout_data = stdout_data
self.error_dict = error_dict
def __str__(self) -> str:
return f'''{self.message}
> command: `{' '.join((str(i) for i in self.command))}`
> return code: `{self.return_code}`
> stdout:
{self.stdout_data}
> stderr:
{self.stderr_data}'''.strip() |
def main():
config = get_config()
experiment = ExperimentHandler(config)
print(('\x1b]2;%s\x1b\\' % config['experiment_name']))
experiment.run()
print('Experiment concluded.') |
class QuantizePerTensorBenchmark(op_bench.TorchBenchmarkBase):
def init(self, C, M, N, dtype, mode):
assert (mode in ('Q', 'D'))
self.input = torch.rand(C, M, N)
self.dtype = dtype
self.op = nnq.Quantize(scale=1.0, zero_point=0, dtype=dtype)
self.set_module_name('QuantizePerTensor')
if (mode == 'D'):
self.input = self.op(self.input)
self.op = nnq.DeQuantize()
self.set_module_name('DequantizePerTensor')
def forward(self):
return self.op(self.input) |
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0 |
class SkylineServer():
def __init__(self, host, port):
self._requested_host = host
self._requested_port = port
self._connection_acceptor = ConnectionAcceptor(self._requested_host, self._requested_port, self._on_new_connection)
self._connection_manager = ConnectionManager(self._on_message, self._on_connection_closed)
self._message_sender = MessageSender(self._connection_manager)
self._analysis_request_manager = AnalysisRequestManager(self._submit_work, self._message_sender, self._connection_manager)
self._message_handler = MessageHandler(self._connection_manager, self._message_sender, self._analysis_request_manager)
self._main_executor = ThreadPoolExecutor(max_workers=1)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def start(self):
self._analysis_request_manager.start()
self._connection_acceptor.start()
logger.debug('Skyline server has started.')
def stop(self):
def shutdown():
self._connection_acceptor.stop()
self._connection_manager.stop()
self._analysis_request_manager.stop()
self._main_executor.submit(shutdown).result()
self._main_executor.shutdown()
logger.debug('Skyline server has shut down.')
def listening_on(self):
return (self._connection_acceptor.host, self._connection_acceptor.port)
def _on_message(self, data, address):
self._main_executor.submit(self._message_handler.handle_message, data, address)
def _on_new_connection(self, socket, address):
self._main_executor.submit(self._connection_manager.register_connection, socket, address)
def _on_connection_closed(self, address):
self._main_executor.submit(self._connection_manager.remove_connection, address)
def _submit_work(self, func, *args, **kwargs):
self._main_executor.submit(func, *args, **kwargs) |
def update_params(batch, i_iter):
states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)
actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)
rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)
masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)
with torch.no_grad():
values = value_net(states)
fixed_log_probs = policy_net.get_log_prob(states, actions)
'get advantage estimation from the trajectories'
(advantages, returns) = estimate_advantages(rewards, masks, values, args.gamma, args.tau, device)
for _ in range(1):
expert_state_actions = torch.from_numpy(expert_traj).to(dtype).to(device)
g_o = discrim_net(torch.cat([states, actions], 1))
e_o = discrim_net(expert_state_actions)
optimizer_discrim.zero_grad()
discrim_loss = (discrim_criterion(g_o, ones((states.shape[0], 1), device=device)) + discrim_criterion(e_o, zeros((expert_traj.shape[0], 1), device=device)))
discrim_loss.backward()
optimizer_discrim.step()
'perform mini-batch PPO update'
optim_iter_num = int(math.ceil((states.shape[0] / optim_batch_size)))
for _ in range(optim_epochs):
perm = np.arange(states.shape[0])
np.random.shuffle(perm)
perm = LongTensor(perm).to(device)
(states, actions, returns, advantages, fixed_log_probs) = (states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), fixed_log_probs[perm].clone())
for i in range(optim_iter_num):
ind = slice((i * optim_batch_size), min(((i + 1) * optim_batch_size), states.shape[0]))
(states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b) = (states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind])
ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, 1, states_b, actions_b, returns_b, advantages_b, fixed_log_probs_b, args.clip_epsilon, args.l2_reg) |
(frozen=True)
class MetricInfo():
canonical_name: str
aka: set[str]
dist_func: Callable
cdist_func: Callable
pdist_func: Callable
validator: Optional[Callable] = None
types: list[str] = dataclasses.field(default_factory=(lambda : ['double']))
requires_contiguous_out: bool = True |
def register_Ns3SimpleOfdmSendParam_methods(root_module, cls):
cls.add_constructor([param('ns3::simpleOfdmSendParam const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::bvec const &', 'fecBlock'), param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'Frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPowerDbm')])
cls.add_constructor([param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'Frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPowerDbm'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('GetBurst', 'ns3::Ptr< ns3::PacketBurst >', [])
cls.add_method('GetBurstSize', 'uint32_t', [])
cls.add_method('GetDirection', 'uint8_t', [])
cls.add_method('GetFecBlock', 'ns3::bvec', [])
cls.add_method('GetFrequency', 'uint64_t', [])
cls.add_method('GetIsFirstBlock', 'bool', [])
cls.add_method('GetModulationType', 'ns3::WimaxPhy::ModulationType', [])
cls.add_method('GetRxPowerDbm', 'double', [])
cls.add_method('SetBurstSize', 'void', [param('uint32_t', 'burstSize')])
cls.add_method('SetDirection', 'void', [param('uint8_t', 'direction')])
cls.add_method('SetFecBlock', 'void', [param('ns3::bvec const &', 'fecBlock')])
cls.add_method('SetFrequency', 'void', [param('uint64_t', 'Frequency')])
cls.add_method('SetIsFirstBlock', 'void', [param('bool', 'isFirstBlock')])
cls.add_method('SetModulationType', 'void', [param('ns3::WimaxPhy::ModulationType', 'modulationType')])
cls.add_method('SetRxPowerDbm', 'void', [param('double', 'rxPowerDbm')])
return |
def ocp_ksp(F, bcs, J, y, u, p, config_ocp, ksp_options):
return cashocs.OptimalControlProblem(F, bcs, J, y, u, p, config=config_ocp, ksp_options=ksp_options) |
.parametrize('observation_shape', [(4, 84, 84)])
def test_pixel_observation_scaler(observation_shape: Sequence[int]) -> None:
scaler = PixelObservationScaler()
x = torch.randint(high=255, size=observation_shape)
y = scaler.transform(x)
assert torch.all((y == (x.float() / 255.0)))
assert (scaler.get_type() == 'pixel')
assert torch.all((scaler.reverse_transform(y) == x))
assert scaler.built
PixelObservationScaler.deserialize(scaler.serialize()) |
def check_kind_cluster() -> None:
try:
kind_clusters_process = subprocess.run('kind get clusters'.split(), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
kind_clusters = set(kind_clusters_process.stdout.decode('utf-8').split())
if ('kind' not in kind_clusters):
logging.info('Creating kind cluster...')
create_kind_cluster()
except (subprocess.CalledProcessError, FileNotFoundError) as e:
logging.error('Cannot check kind cluster, reason: {}'.format(e)) |
def convert_datasets(train: List, valid: List, test: List, subj_index_mapper: IndexMapper, obj_index_mapper: IndexMapper, rel_index_mapper: IndexMapper, triple_format_parser=(lambda x: x.strip().split('\t')), subj_slot=0, rel_slot=1, obj_slot=2, filter_unseen=False, filter_func=None, segment=False):
if (not filter_func):
max_number_of_unknowns = 0
filter_func = (lambda i: (sum([(1 if (sum([(1 if (UNK != k) else 0) for k in j]) <= max_number_of_unknowns) else 0) for j in i]) == 0))
idx_mappers = [subj_index_mapper, rel_index_mapper, obj_index_mapper]
for idx_mapper in idx_mappers:
idx_mapper.init_vocab()
datasets_for_collecting_vocab = [train]
if (not filter_unseen):
datasets_for_collecting_vocab.extend([valid, test])
for data in datasets_for_collecting_vocab:
for x in data:
x = triple_format_parser(x)
subj_index_mapper.collect_vocab(x[subj_slot])
rel_index_mapper.collect_vocab(x[rel_slot])
obj_index_mapper.collect_vocab(x[obj_slot])
for idx_mapper in idx_mappers:
idx_mapper.finalize_vocab()
def convert_data_to_idx(data):
return [list(zip(*(subj_index_mapper.toidx(s), rel_index_mapper.toidx(r), obj_index_mapper.toidx(o)))) for (s, r, o) in map(triple_format_parser, data)]
train_converted = convert_data_to_idx(train)
valid_converted = convert_data_to_idx(valid)
test_converted = convert_data_to_idx(test)
if segment:
entity_id_token_ids_map = OrderedDict()
relation_id_token_ids_map = OrderedDict()
for triple__id_and_segmented in ((train_converted + valid_converted) + test_converted):
(triple, triple_segmented) = triple__id_and_segmented
entity_id_token_ids_map[triple[subj_slot][0]] = triple_segmented[subj_slot]
relation_id_token_ids_map[triple[rel_slot][0]] = triple_segmented[rel_slot]
entity_id_token_ids_map[triple[obj_slot][0]] = triple_segmented[obj_slot]
return (filter(filter_func, map(itemgetter(0), train_converted)), filter(filter_func, map(itemgetter(0), valid_converted)), filter(filter_func, map(itemgetter(0), test_converted)), entity_id_token_ids_map, relation_id_token_ids_map)
else:
return (filter(filter_func, map(itemgetter(0), train_converted)), filter(filter_func, map(itemgetter(0), valid_converted)), filter(filter_func, map(itemgetter(0), test_converted))) |
def call_intersphinx(app, env, node, contnode):
debug_inf(app, ('???? Trying intersphinx for %s' % node['reftarget']))
builder = app.builder
res = intersphinx.missing_reference(app, env, node, contnode)
if res:
if res['refuri'].startswith(SAGE_DOC):
here = os.path.dirname(os.path.join(builder.outdir, node['refdoc']))
res['refuri'] = os.path.relpath(res['refuri'], here)
debug_inf(app, ('++++ Found at %s' % res['refuri']))
else:
debug_inf(app, ('---- Intersphinx: %s not Found' % node['reftarget']))
return res |
class KleshchevCrystalMixin():
def epsilon(self, i):
return len(self.normal_cells(i))
def phi(self, i):
return len(self.conormal_cells(i))
def Epsilon(self):
P = self.parent()
WLR = P.weight_lattice_realization()
La = WLR.fundamental_weights()
n = self.normal_cells()
return WLR.sum(((len(n[i]) * La[i]) for i in P.index_set() if (i in n)))
def Phi(self):
P = self.parent()
WLR = P.weight_lattice_realization()
La = WLR.fundamental_weights()
c = self.conormal_cells()
return WLR.sum(((len(c[i]) * La[i]) for i in P.index_set() if (i in c)))
def weight(self):
WLR = self.parent().weight_lattice_realization()
alpha = WLR.simple_roots()
La = WLR.fundamental_weights()
r = self.parent()._multicharge
wt = WLR.sum((La[ZZ(x)] for x in r))
return (wt - WLR.sum((alpha[self.content(*c, multicharge=r)] for c in self.cells()))) |
(Output('topic-table', 'data'), [Input('topic-data', 'data')])
def get_topic_words(data):
topic_words = get_top_n_words(data['topics'], n=15)
topic_names = [topic['name'] for topic in data['topics'].values()]
topic_dict = {}
topic_dict['topic_names'] = topic_names
topic_dict['topic_words'] = topic_words
topicDF = pd.DataFrame.from_dict(topic_dict)
topicDF = topicDF.sort_values(by='topic_names')
output = topicDF.to_dict(orient='records')
return output |
class ReversePseudoFP16Initializer(Initializer):
def update(self, operator_name, kwargs):
if (self.operator_name is not None):
raise Exception('Operator name overwrites are not allowed')
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
param_fp32 = init_net.__getattr__(self.operator_name)([], param_name, shape=shape, **self.operator_kwargs)
param_fp16 = init_net.FloatToHalf(param_fp32, (param_name + '_fp16'))
return ParameterInfo(param_id=None, param=param_fp32, shape=shape, blob_copy={DataType.FLOAT16: param_fp16}) |
def register_functions(root_module):
module = root_module
module.add_function('MakePriomapChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
return |
def gather_metadata() -> Dict:
date_start = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.commit().hexsha
git_data = dict(commit=git_sha, branch=repo.active_branch.name, is_dirty=repo.is_dirty(), path=repo.git_dir)
except:
git_data = None
if ('SLURM_JOB_ID' in os.environ):
slurm_env_keys = [k for k in os.environ if k.startswith('SLURM')]
slurm_data = {}
for k in slurm_env_keys:
d_key = k.replace('SLURM_', '').replace('SLURMD_', '').lower()
slurm_data[d_key] = os.environ[k]
else:
slurm_data = None
return dict(date_start=date_start, date_end=None, successful=False, git=git_data, slurm=slurm_data, env=os.environ.copy()) |
def get_examples(data_dir, set_type):
examples = []
levels = ['middle', 'high']
set_type_c = set_type.split('-')
if (len(set_type_c) == 2):
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, 'r') as f:
cur_data = json.load(f)
answers = cur_data['answers']
options = cur_data['options']
questions = cur_data['questions']
context = cur_data['article'].replace('\n', ' ')
context = re.sub('\\s+', ' ', context)
for i in range(len(answers)):
label = (ord(answers[i]) - ord('A'))
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if ('_' in question):
qa_cat = question.replace('_', option)
else:
qa_cat = ' '.join([question, option])
qa_cat = re.sub('\\s+', ' ', qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples |
def hash_file(path, blocksize=(1 << 20)):
h = hashlib.sha256()
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
return (h, length) |
class ResNet_Atrous(nn.Module):
def __init__(self, block, layers, atrous=None, os=16):
super(ResNet_Atrous, self).__init__()
stride_list = None
if (os == 8):
stride_list = [2, 1, 1]
elif (os == 16):
stride_list = [2, 2, 1]
else:
raise ValueError(('resnet_atrous.py: output stride=%d is not supported.' % os))
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = SynchronizedBatchNorm2d(64, momentum=0.0003)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, 64, layers[0])
self.layer2 = self._make_layer(block, 256, 128, layers[1], stride=stride_list[0])
self.layer3 = self._make_layer(block, 512, 256, layers[2], stride=stride_list[1], atrous=(16 // os))
self.layer4 = self._make_layer(block, 1024, 512, layers[3], stride=stride_list[2], atrous=[((item * 16) // os) for item in atrous])
self.layer5 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[((item * 16) // os) for item in atrous])
self.layer6 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[((item * 16) // os) for item in atrous])
self.layer7 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[((item * 16) // os) for item in atrous])
self.layers = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, SynchronizedBatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def get_layers(self):
return self.layers
def _make_layer(self, block, inplanes, planes, blocks, stride=1, atrous=None):
downsample = None
if (atrous == None):
atrous = ([1] * blocks)
elif isinstance(atrous, int):
atrous_list = ([atrous] * blocks)
atrous = atrous_list
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, dilation=atrous[0], bias=False), SynchronizedBatchNorm2d((planes * block.expansion), momentum=0.0003))
layers = []
layers.append(block(inplanes, planes, stride=stride, atrous=atrous[0], downsample=downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block((planes * block.expansion), planes, stride=1, atrous=atrous[i]))
return nn.Sequential(*layers)
def forward(self, x):
self.layers = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
self.layers.append(x)
x = self.layer2(x)
self.layers.append(x)
x = self.layer3(x)
self.layers.append(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
self.layers.append(x)
return x |
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-06, *args, **kwargs):
super(Adagrad, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = shared_scalar(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
for (p, g, a, c) in zip(params, grads, accumulators, constraints):
new_a = (a + (g ** 2))
self.updates.append((a, new_a))
new_p = (p - ((self.lr * g) / T.sqrt((new_a + self.epsilon))))
self.updates.append((p, c(new_p)))
return self.updates
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'epsilon': self.epsilon} |
.operations('success')
.openapi_version('3.0')
def test_forbid_simultaneous_use_of_deprecated_and_new_options(cli, schema_url, cassette_path, snapshot_cli):
assert (cli.run(schema_url, f'--store-network-log={cassette_path}', f'--cassette-path={cassette_path}') == snapshot_cli) |
def get_label2prevalence(df, tasks):
label2prevalence = {}
for task in tasks:
num_labeled = ((df[task] == 1) | (df[task] == 0)).sum()
num_positive = (df[task] == 1).sum()
prevalence = (num_positive / num_labeled)
label2prevalence[task] = prevalence
return label2prevalence |
def eval_success(sessions) -> list:
success = []
for sess in sessions:
r = get_reward(sess)
if (r >= 1):
success.append(1)
else:
success.append(0)
return success |
def resnet34(pretrained=False, progress=True, device='cpu', **kwargs):
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, device, **kwargs) |
def generate_result(dataset, ground_truth, prediction):
activity_map = json.load(open(os.path.join('configs', 'activity_maps', (dataset + '.json'))))
activity_names = list(activity_map.values())
print('\n[CLASSIFICATION REPORT]')
print(classification_report(np.argmax(ground_truth, axis=1), np.argmax(prediction, axis=1), labels=range(len(activity_names)), target_names=activity_names, zero_division=1))
confm = confusion_matrix(np.argmax(ground_truth, axis=1), np.argmax(prediction, axis=1), labels=range(len(activity_names)), normalize='true')
df_cm = pd.DataFrame(confm, index=activity_names, columns=activity_names)
plt.figure(figsize=(12, 10))
sns.heatmap(df_cm, annot=True, fmt='.3f', cmap='YlGnBu')
out_fig = (dataset + '_confusion_matrix.png')
plt.savefig(os.path.join('results', out_fig))
print(f'''
Confusion matrix plot generated for {dataset}: Check "./results" direcotry''') |
def norm2(x: Tensor, axis=[(- 2), (- 1)]) -> Tensor:
n = tf.math.real(tf.math.multiply(tf.math.conj(x), x))
if (len(axis) == 0):
return n
return tf.math.reduce_sum(n, axis=axis) |
class MIOAlgorithm(GenerationAlgorithm[arch.MIOArchive]):
_logger = logging.getLogger(__name__)
def __init__(self) -> None:
super().__init__()
self._solution: (tcc.TestCaseChromosome | None) = None
self._parameters = Parameters()
self._current_mutations = 0
self._focused = False
def generate_tests(self) -> tsc.TestSuiteChromosome:
self.before_search_start()
while (self.resources_left() and ((len(self._test_case_fitness_functions) - self._archive.num_covered_targets) != 0)):
self.evolve()
self._update_parameters()
self.after_search_iteration(self.create_test_suite(self._archive.solutions))
self.after_search_finish()
return self.create_test_suite(self._archive.solutions)
def _update_parameters(self):
progress = self.progress()
progress_until_focused = (progress / config.configuration.mio.exploitation_starts_at_percent)
if self._focused:
return
n_before = self._parameters.n
if (progress > config.configuration.mio.exploitation_starts_at_percent):
self._logger.debug('Entering focused phase.')
self._focused = True
self._parameters.Pr = config.configuration.mio.focused_config.random_test_or_from_archive_probability
self._parameters.n = config.configuration.mio.focused_config.number_of_tests_per_target
self._parameters.m = config.configuration.mio.focused_config.number_of_mutations
else:
self._parameters.Pr = MIOAlgorithm._scale(config.configuration.mio.initial_config.random_test_or_from_archive_probability, config.configuration.mio.focused_config.random_test_or_from_archive_probability, progress_until_focused)
self._parameters.n = ceil(MIOAlgorithm._scale(config.configuration.mio.initial_config.number_of_tests_per_target, config.configuration.mio.focused_config.number_of_tests_per_target, progress_until_focused))
self._parameters.m = ceil(MIOAlgorithm._scale(config.configuration.mio.initial_config.number_of_mutations, config.configuration.mio.focused_config.number_of_mutations, progress_until_focused))
self._parameters.is_valid()
if (n_before != self._parameters.n):
self._archive.shrink_solutions(self._parameters.n)
def _scale(initial, focused, progress_until_focused):
return (initial + ((focused - initial) * progress_until_focused))
def evolve(self) -> None:
if ((self._solution is not None) and (self._current_mutations < self._parameters.m)):
offspring = self._solution.clone()
offspring.mutate()
self._current_mutations += 1
elif (randomness.next_float() < self._parameters.Pr):
offspring = self.chromosome_factory.get_chromosome()
self._current_mutations = 1
else:
maybe_offspring = self._archive.get_solution()
if (maybe_offspring is None):
offspring = self.chromosome_factory.get_chromosome()
else:
offspring = maybe_offspring
offspring.mutate()
self._current_mutations = 1
if self._archive.update([offspring]):
self._solution = offspring |
def rot_ply_loss(gt, pred, num_samples, img_size=256):
rotate_gt = (rotate_to_horizon(gt, img_size) - (img_size / 2))
rotate_pred = (rotate_to_horizon(pred, img_size) - (img_size / 2))
rotate_gt = (rotate_gt / torch.max(torch.abs(rotate_gt), dim=1).values.unsqueeze(1))
rotate_pred = (rotate_pred / torch.max(torch.abs(rotate_pred), dim=1).values.unsqueeze(1))
(gt_1, gt_2, gt_3, gt_4) = contour_sampling(rotate_gt, num_samples)
(pred_1, pred_2, pred_3, pred_4) = contour_sampling(rotate_pred, num_samples)
ply_loss = (((pytorch3d.loss.chamfer_distance(gt_1, pred_1) + pytorch3d.loss.chamfer_distance(gt_2, pred_2)) + pytorch3d.loss.chamfer_distance(gt_3, pred_3)) + pytorch3d.loss.chamfer_distance(gt_4, pred_4))
return (torch.mean(ply_loss[0]) / (4 * num_samples)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.