code stringlengths 101 5.91M |
|---|
def update_and_save_stats(new_stats, label, yaml_filename):
stats = dict()
if os.path.exists(yaml_filename):
stats = yaml.load(open(yaml_filename, 'r'), Loader=yaml.FullLoader)
stats[label] = new_stats
with open(yaml_filename, 'w') as outfile:
outfile.write(yaml.dump(stats, default_flow_style=False))
return |
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = ((level / _MAX_LEVEL) * float(translate_const))
level = _randomly_negate(level)
return (level,) |
class WeiboNERLoader(CNNERLoader):
def __init__(self):
super().__init__()
def download(self) -> str:
dataset_name = 'weibo-ner'
data_dir = self._get_dataset_path(dataset_name=dataset_name)
return data_dir |
class MlpBlock(nn.Module):
def __init__(self, input_dim, mlp_dim=512):
super().__init__()
self.fc1 = nn.Linear(input_dim, mlp_dim)
self.gelu = nn.GELU()
self.fc2 = nn.Linear(mlp_dim, input_dim)
def forward(self, x):
return self.fc2(self.gelu(self.fc1(x))) |
def ask_questions_in_text(passage, bridge_entities, p_index):
QA_pairs = qg_nlp.qg_without_answer(passage)
valid_triples = []
for qa in QA_pairs:
bridge = include_bridge_entity(qa['question'], bridge_entities)
if (not (bridge is None)):
valid_triples.append([qa['question'], bridge, qa['answer'], p_index])
return valid_triples |
def test_min_span_tree_plot():
clusterer = HDBSCAN(gen_min_span_tree=True).fit(X)
if_matplotlib(clusterer.minimum_spanning_tree_.plot)(edge_cmap='Reds')
(H, y) = make_blobs(n_samples=50, random_state=0, n_features=10)
H = StandardScaler().fit_transform(H)
clusterer = HDBSCAN(gen_min_span_tree=True).fit(H)
if_matplotlib(clusterer.minimum_spanning_tree_.plot)(edge_cmap='Reds', vary_line_width=False, colorbar=False)
(H, y) = make_blobs(n_samples=50, random_state=0, n_features=40)
H = StandardScaler().fit_transform(H)
clusterer = HDBSCAN(gen_min_span_tree=True).fit(H)
if_matplotlib(clusterer.minimum_spanning_tree_.plot)(edge_cmap='Reds', vary_line_width=False, colorbar=False) |
def has_indirect_component(k1, k2, k3, k4, k5, k6):
two_p = ((k2 + k4) + 1)
two_p1 = ((- 1) * ((k1 + k3) - 1))
m = ((k5 - two_p1) + 1)
m_is_zero_or_one = ((m == 0) or (m == 1))
return (is_zero_or_two(two_p) and is_zero_or_two(two_p1) and m_is_zero_or_one) |
class AttackerNode2(Node):
def config(self, **params):
super(AttackerNode2, self).config(**params)
self.cmd('ifconfig attacker2-eth1 10.0.0.2')
self.cmd('sh bridge-start2.sh')
self.cmd('openvpn openvpn-server2.conf &')
def terminate(self):
self.cmd('pkill openvpn')
self.cmd('sh bridge-stop2.sh')
super(AttackerNode2, self).terminate() |
def _read_annotations(csv_reader, classes):
result = {}
for (line, row) in enumerate(csv_reader):
try:
(img_file, x1, y1, x2, y2, class_name) = row
except ValueError:
raise_from(ValueError("line {}: format should be 'img_file,x1,y1,x2,y2,class_name' or 'img_file,,,,,'".format(line)), None)
if (img_file not in result):
result[img_file] = []
if ((x1, y1, x2, y2, class_name) == ('', '', '', '', '')):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if (x2 <= x1):
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if (y2 <= y1):
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
if (class_name not in classes):
raise ValueError("line {}: unknown class name: '{}' (classes: {})".format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result |
class EngineType(enum.Enum):
TPU = 1
GDMA = 2
SDMA = 3
HAU = 4
Engine_TYPE_END = 5 |
class LinearReluLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x |
class MetricTracker():
def __init__(self, patience: Optional[int]=None, should_decrease: bool=None):
self._best_so_far: Optional[float] = None
self._patience = patience
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch_metrics: Dict[(str, float)] = {}
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self._should_decrease = should_decrease
def clear(self) -> None:
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
def state_dict(self) -> Dict[(str, Any)]:
return {'best_so_far': self._best_so_far, 'patience': self._patience, 'epochs_with_no_improvement': self._epochs_with_no_improvement, 'is_best_so_far': self._is_best_so_far, 'should_decrease': self._should_decrease, 'best_epoch_metrics': self.best_epoch_metrics, 'epoch_number': self._epoch_number, 'best_epoch': self.best_epoch}
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
self._best_so_far = state_dict['best_so_far']
self._patience = state_dict['patience']
self._epochs_with_no_improvement = state_dict['epochs_with_no_improvement']
self._is_best_so_far = state_dict['is_best_so_far']
self._should_decrease = state_dict['should_decrease']
self.best_epoch_metrics = state_dict['best_epoch_metrics']
self._epoch_number = state_dict['epoch_number']
self.best_epoch = state_dict['best_epoch']
def add_metric(self, metric: float) -> None:
new_best = ((self._best_so_far is None) or (self._should_decrease and (metric < self._best_so_far)) or ((not self._should_decrease) and (metric > self._best_so_far)))
if new_best:
self.best_epoch = self._epoch_number
self._is_best_so_far = True
self._best_so_far = metric
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
self._epoch_number += 1
def add_metrics(self, metrics: Iterable[float]) -> None:
for metric in metrics:
self.add_metric(metric)
def is_best_so_far(self) -> bool:
return self._is_best_so_far
def should_stop_early(self) -> bool:
if (self._patience is None):
return False
else:
return (self._epochs_with_no_improvement >= self._patience) |
def tuple_to_short_str(the_tuple: tuple) -> str:
short_str = ''
for entry in the_tuple:
short_str += (str(entry) + ',')
return short_str[:(- 1)] |
class SciKernelInitializer(k.initializers.VarianceScaling):
def __init__(self, lay=0, seed=None):
self.lay = lay
self.w0 = 1.0
scale = 1.0
distribution = 'truncated_normal'
if (lay == 0):
mode = 'fan_in'
else:
mode = 'fan_avg'
super(SciKernelInitializer, self).__init__(scale=scale, mode=mode, distribution=distribution, seed=seed)
def get_config(self):
base_config = super().get_config()
config = {'w0': self.w0, 'lay': self.lay, 'bias': self.bias}
return dict((list(base_config.items()) + list(config.items()))) |
def parse_math_answer(setting_name, raw_string):
if (setting_name == 'few-shot-CoT'):
raw_string = extract_last_line(raw_string)
if ((setting_name == 'few-shot-CoT') or (setting_name == 'few-shot')):
raw_string = remove_few_shot_prefix(raw_string)
return raw_string
def remove_boxed(s):
left = '\\boxed{'
try:
assert (s[:len(left)] == left)
assert (s[(- 1)] == '}')
answer = s[len(left):(- 1)]
if ('=' in answer):
answer = answer.split('=')[(- 1)].lstrip(' ')
return answer
except:
return None
def last_boxed_only_string(string):
idx = string.rfind('\\boxed')
if (idx < 0):
idx = string.rfind('\\fbox')
if (idx < 0):
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while (i < len(string)):
if (string[i] == '{'):
num_left_braces_open += 1
if (string[i] == '}'):
num_left_braces_open -= 1
if (num_left_braces_open == 0):
right_brace_idx = i
break
i += 1
if (right_brace_idx == None):
retval = None
else:
retval = string[idx:(right_brace_idx + 1)]
return retval
def get_answer_with_dollar_sign(s):
first_pattern = '\\$(.*)\\$'
last_match = None
matches = re.findall(first_pattern, s)
if matches:
last_match = matches[(- 1)]
if ('=' in last_match):
last_match = last_match.split('=')[(- 1)].lstrip(' ')
return last_match
def get_answer_without_dollar_sign(s):
last_match = None
if ('=' in s):
last_match = s.split('=')[(- 1)].lstrip(' ').rstrip('.')
if ('\\n' in last_match):
last_match = last_match.split('\\n')[0]
else:
pattern = '(?:\\$)?\\d+(?:\\.\\d+)?(?![\\w\\d])'
matches = re.findall(pattern, s)
if matches:
last_match = matches[(- 1)]
return last_match
raw_string = remove_few_shot_prefix(raw_string)
if ('\\boxed' in raw_string):
answer = remove_boxed(last_boxed_only_string(raw_string))
else:
answer = get_answer_with_dollar_sign(raw_string)
if (not answer):
answer = get_answer_without_dollar_sign(raw_string)
return answer |
class ConstraintPage():
def __init__(self, template_object: PageTemplate) -> None:
self.template_object = template_object
def page_writer(self, constraints: List[ForeignKeyConstraint], tables: List[Table], new_file: str):
page_data = PageData('constraint.html', 'constraint.js')
page_data.add_scope('constraints', constraints)
page_data.add_scope('constraints_num', len(constraints))
page_data.add_scope('check_constraints', self.collect_check_constraints(tables))
page_data.set_depth(0)
pagination_configs = {'fk_table': {'paging': 'true', 'pageLength': 20, 'lengthChange': 'false'}, 'check_table': {'paging': 'true', 'pageLength': 10, 'lengthChange': 'false'}}
return self.template_object.write_data(page_data, new_file, 'constraint.js', pagination_configs)
def collect_check_constraints(tables: List[Table]):
all_constraints = []
results = []
for table in tables:
if (len(table.check_constraints) > 0):
all_constraints.append(table.check_constraints)
for x in all_constraints:
results.append(TemplateConstraint(x, x.keys(), x.values())) |
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
if (not quantize):
with open(filename, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
else:
assert (concat_axis in [0, 1])
(dx, dy) = quantize_flow(flow, *args, **kwargs)
dxdy = np.concatenate((dx, dy), axis=concat_axis)
os.makedirs(filename, exist_ok=True)
cv2.imwrite(dxdy, filename) |
def test_merge_min():
dict0 = {0: 0.5, 1: 0.2}
dict1 = {0: 0.3, 1: 0.6}
ExecutionTrace._merge_min(dict0, dict1)
assert (dict0 == {0: 0.3, 1: 0.2}) |
class Block(nn.Module):
def __init__(self, dim, key_dim, num_heads, mlp_ratio=4.0, attn_ratio=2.0, drop=0.0, drop_path=0.0, act_layer=nn.ReLU, norm_cfg=dict(type='BN2d', requires_grad=True)):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.mlp_ratio = mlp_ratio
self.attn = Attention(dim, key_dim=key_dim, num_heads=num_heads, attn_ratio=attn_ratio, activation=act_layer, norm_cfg=norm_cfg)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, norm_cfg=norm_cfg)
def forward(self, x1):
x1 = (x1 + self.drop_path(self.attn(x1)))
x1 = (x1 + self.drop_path(self.mlp(x1)))
return x1 |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_md_idno(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([idno.compact(val)] + result)
return result |
class DataSetIter(BatchIter):
def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False, num_workers=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, batch_sampler=None):
assert isinstance(dataset, DataSet)
dataset = DataSetGetter(dataset, as_numpy)
collate_fn = dataset.collate_fn
if (batch_sampler is not None):
batch_size = 1
sampler = None
drop_last = False
super().__init__(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, collate_fn=collate_fn, batch_sampler=batch_sampler)
def __iter__(self):
self.init_iter()
for (indices, batch_x, batch_y) in self.dataiter:
self.cur_batch_indices = indices
(yield (batch_x, batch_y)) |
def test_combine_outfile(tmp_path, script_runner):
temp_1 = tmp_path.joinpath('parsed_output.json')
temp_2 = tmp_path.joinpath('renamed_output.json')
command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1} --hide-progress'
ret = script_runner.run(shlex.split(command))
rename_channels = {'channel1': 'channel2'}
rename_measurements = {'ConstExample': 'OtherConstExample', 'LogNormExample': 'OtherLogNormExample', 'GaussExample': 'OtherGaussExample', 'GammaExample': 'OtherGammaExample'}
_opts_channels = ''.join(((' -c ' + ' '.join(item)) for item in rename_channels.items()))
_opts_measurements = ''.join(((' --measurement ' + ' '.join(item)) for item in rename_measurements.items()))
command = f'pyhf rename {temp_1} {_opts_channels:s} {_opts_measurements:s} --output-file {temp_2}'
ret = script_runner.run(shlex.split(command))
tempout = tmp_path.joinpath('combined_output.json')
command = f'pyhf combine {temp_1} {temp_2} --output-file {tempout}'
ret = script_runner.run(shlex.split(command))
assert ret.success
combined_spec = json.loads(tempout.read_text())
combined_ws = pyhf.Workspace(combined_spec)
assert (combined_ws.channels == ['channel1', 'channel2'])
assert (len(combined_ws.measurement_names) == 8) |
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) **', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) *', u'ns3::TracedValueCallback::SequenceNumber32')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) **', u'ns3::TracedValueCallback::SequenceNumber32*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) *&', u'ns3::TracedValueCallback::SequenceNumber32&')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) **', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) **', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) **', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) **', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) **', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *&', u'ns3::TracedValueCallback::Uint16&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) **', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) **', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( ) *', u'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias(u'void ( * ) ( ) **', u'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias(u'void ( * ) ( ) *&', u'ns3::TracedValueCallback::Void&') |
def main():
parser = argparse.ArgumentParser(description='PyTorch distributed benchmark diff')
parser.add_argument('file', nargs=2)
args = parser.parse_args()
if (len(args.file) != 2):
raise 'Must specify 2 files to diff'
ja = load(args.file[0])
jb = load(args.file[1])
keys = ((set(ja.keys()) | set(jb.keys())) - set(['benchmark_results']))
print('{:20s} {:>20s} {:>20s}'.format('', 'baseline', 'test'))
print('{:20s} {:>20s} {:>20s}'.format('', ('-' * 20), ('-' * 20)))
for key in sorted(keys):
va = str(ja.get(key, '-'))
vb = str(jb.get(key, '-'))
print('{:20s} {:>20s} vs {:>20s}'.format((key + ':'), va, vb))
print('')
ba = ja['benchmark_results']
bb = jb['benchmark_results']
for (ra, rb) in zip(ba, bb):
if (ra['model'] != rb['model']):
continue
if (ra['batch_size'] != rb['batch_size']):
continue
model = ra['model']
batch_size = int(ra['batch_size'])
name = '{} with batch size {}'.format(model, batch_size)
print('Benchmark: {}'.format(name))
print('')
print('{:>10s}'.format(''), end='')
for _ in [75, 95]:
print('{:>16s}{:>10s}{:>10s}'.format('sec/iter', 'ex/sec', 'diff'), end='')
print('')
for (i, (xa, xb)) in enumerate(zip(ra['result'], rb['result'])):
if (i == 0):
continue
if (len(xa['ranks']) != len(xb['ranks'])):
continue
ngpus = len(xa['ranks'])
ma = sorted(xa['measurements'])
mb = sorted(xb['measurements'])
print('{:>4d} GPUs:'.format(ngpus), end='')
for p in [75, 95]:
va = np.percentile(ma, p)
vb = np.percentile(mb, p)
delta = ((- 100) * ((vb - va) / va))
print(' p{:02d}: {:8.3f}s {:7d}/s {:+8.1f}%'.format(p, vb, int((batch_size / vb)), delta), end='')
print('')
print('') |
class GradleCommand(BuildCommand):
def name() -> str:
return 'gradle'
def _prepare_args(self, args: List[str]) -> List[str]:
return (args + ['--debug'])
def _get_errors(self, output: str, error: str) -> str:
lines = output.splitlines()
return '\n'.join([line for line in lines if ('[ERROR]' in line)])
def _get_dependencies(self, shell_output: str, project_dir: str, logger: Logger) -> Set[str]:
buildfile_dir = self._parse_buildfile_dir(self.args)
shutil.copy(os.path.join(os.path.dirname(__file__), 'classpath.gradle'), os.path.join(project_dir, buildfile_dir))
command = "gradle :printClasspath -b '{}'".format(os.path.join(buildfile_dir, 'classpath.gradle'))
output = Shell.exec(command, cwd=project_dir, logger=logger)
return self._parse_classpath(output)
def _parse_classpath(self, shell_output: str) -> Set[str]:
lines = shell_output.splitlines()
first_dependency_idx = (next((i for (i, line) in enumerate(lines) if (line == ':printClasspath'))) + 1)
first_empty_line_idx = next((i for (i, line) in enumerate(lines) if (not line)))
return set(lines[first_dependency_idx:first_empty_line_idx])
def _parse_buildfile_dir(self, args):
buildfile_dir = ''
if ('-p' in args):
buildfile_dir = args[(args.index('-p') + 1)]
elif ('--project-dir' in args):
buildfile_dir = args[(args.index('--project-dir') + 1)]
return buildfile_dir |
def mmd(x, y):
(n, dim) = x.shape
xx = (x ** 2).sum(1, keepdim=True)
yy = (y ** 2).sum(1, keepdim=True)
outer_xx = torch.mm(x, x.t())
outer_yy = torch.mm(y, y.t())
outer_xy = torch.mm(x, y.t())
diff_xx = ((xx + xx.t()) - (2 * outer_xx))
diff_yy = ((yy + yy.t()) - (2 * outer_yy))
diff_xy = ((xx + yy.t()) - (2 * outer_xy))
C = (2.0 * dim)
k_xx = (C / (C + diff_xx))
k_yy = (C / (C + diff_yy))
k_xy = (C / (C + diff_xy))
mean_xx = ((k_xx.sum() - k_xx.diag().sum()) / (n * (n - 1)))
mean_yy = ((k_yy.sum() - k_yy.diag().sum()) / (n * (n - 1)))
mean_xy = (k_xy.sum() / (n * n))
return ((mean_xx + mean_yy) - (2 * mean_xy)) |
def main():
app_path = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='BLASYS -- Approximate Logic Synthesis Using Boolean Matrix Factorization')
parser.add_argument('-i', help='Input verilog file', required=True, dest='input')
parser.add_argument('-o', help='Output testbench file', required=True, dest='output')
parser.add_argument('-n', help='Number of test vectors', type=int, default=10000, dest='number')
args = parser.parse_args()
print_banner()
print('Start creating testbench ...')
with open(os.path.join(app_path, 'config', 'params.yml'), 'r') as config_file:
config = yaml.safe_load(config_file)
create_testbench(args.input, args.output, args.number, config['yosys']) |
def __generate_fingerprint(subproc_args):
(torexe, datadir, nickname, torrc) = subproc_args
listfp_cmd = '{} --list-fingerprint --DataDirectory {} --Nickname {} -f {}'.format(torexe, datadir, nickname, torrc)
completed_process = subprocess.run(shlex.split(listfp_cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return completed_process |
def _serialize_to_tensor(data, group):
global _USE_HVD
if _USE_HVD:
backend = 'nccl'
else:
backend = dist.get_backend(group)
assert (backend in ['gloo', 'nccl'])
device = torch.device(('cpu' if (backend == 'gloo') else 'cuda'))
buffer = pickle.dumps(data)
if (len(buffer) > (1024 ** 3)):
logger = logging.getLogger(__name__)
logger.warning('Rank {} trying to all-gather {:.2f} GB of data on device {}'.format(get_rank(), (len(buffer) / (1024 ** 3)), device))
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor |
def test_make_splits_order():
(train, val, test) = make_splits(100, 0.7, 0.2, 0.1, 1234, order=torch.arange(100, 0, (- 1), dtype=torch.int))
assert (train == torch.arange(100, 30, (- 1), dtype=torch.int)).all()
assert (val == torch.arange(30, 10, (- 1), dtype=torch.int)).all()
assert (test == torch.arange(10, 0, (- 1), dtype=torch.int)).all() |
def time_op(device, func, *inputs: tuple, **kwargs):
cuda_mem = 0
if (device.type == 'cuda'):
torch.cuda.reset_max_memory_allocated(device=device)
base_mem = torch.cuda.memory_allocated(device=device)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize(device=device)
start.record()
out = func(*inputs, **kwargs)
end.record()
torch.cuda.synchronize(device=device)
exec_time = start.elapsed_time(end)
peak_usage = torch.cuda.max_memory_allocated(device=device)
cuda_mem = (peak_usage - base_mem)
else:
start = time.time()
out = func(*inputs, **kwargs)
end = time.time()
exec_time = (1000 * (end - start))
return (exec_time, out, cuda_mem) |
.parametrize('shuffle', [True])
def test_simple_data_source_duplicated_order(test_data_csv_png_20, shuffle):
src_data = tuple(zip(range(100), range(100)))
def test_load_func(position):
return src_data[position]
epoch_num = 10
size = len(src_data)
ds = SimpleDataSource(test_load_func, size, shuffle=shuffle)
orders = []
for k in range(epoch_num):
order = []
for i in range(ds.size):
data = ds.next()
order.append(data)
orders.append(order)
ds.reset()
ds.apply_order()
for i in range((len(orders) - 1)):
for j in range((i + 1), len(orders)):
assert (orders[i] != orders[j]) |
def ensure_dir(path):
try:
os.makedirs(path)
except OSError as e:
if (e.errno != errno.EEXIST):
raise |
def env_runner(client: RayInferenceClient, servers: Dict[(str, RayInferenceWorkerSet)], rollout_config: Dict[(str, Any)], server_runtime_config: Dict[(str, Any)], dwriter_info_dict: Dict[(str, Tuple[(str, Queue)])]=None) -> Tuple[(List[Dict[(str, Any)]], Dict[(str, float)])]:
evaluate_on = (server_runtime_config['behavior_mode'] == BehaviorMode.EXPLOITATION)
remote_actor = isinstance(list(servers.values())[0], ActorHandle)
try:
if (dwriter_info_dict is not None):
episodes = NewEpisodeList(num=client.env.num_envs, agents=client.env.possible_agents)
else:
episodes = None
with client.timer.timeit('environment_reset'):
env_rets = client.env.reset(fragment_length=rollout_config['fragment_length'], max_step=rollout_config['max_step'])
(env_dones, processed_env_ret, dataframes) = process_env_rets(env_rets=env_rets, preprocessor=server_runtime_config['preprocessor'], preset_meta_data={'evaluate': evaluate_on})
if (episodes is not None):
episodes.record(processed_env_ret, agent_first=False, is_episode_done=env_dones)
start = time.time()
cnt = 0
while (not client.env.is_terminated()):
grouped_data_frames: Dict[(str, List[DataFrame])] = defaultdict((lambda : []))
for (agent, dataframe) in dataframes.items():
runtime_id = client.training_agent_mapping(agent)
grouped_data_frames[runtime_id].append(dataframe)
with client.timer.time_avg('policy_step'):
if remote_actor:
policy_outputs: Dict[(str, List[DataFrame])] = {rid: ray.get(server.compute_action.remote(grouped_data_frames[rid], runtime_config=server_runtime_config)) for (rid, server) in servers.items()}
else:
policy_outputs: Dict[(str, List[DataFrame])] = {rid: server.compute_action(grouped_data_frames[rid], runtime_config=server_runtime_config) for (rid, server) in servers.items()}
with client.timer.time_avg('process_policy_output'):
(env_actions, processed_policy_outputs) = process_policy_outputs(policy_outputs, client.env)
if (episodes is not None):
episodes.record(processed_policy_outputs, agent_first=True, is_episode_done=env_dones)
with client.timer.time_avg('environment_step'):
env_rets = client.env.step(env_actions)
(env_dones, processed_env_ret, dataframes) = process_env_rets(env_rets=env_rets, preprocessor=server_runtime_config['preprocessor'], preset_meta_data={'evaluate': evaluate_on})
if (episodes is not None):
episodes.record(processed_env_ret, agent_first=False, is_episode_done=env_dones)
cnt += 1
if (dwriter_info_dict is not None):
episodes = episodes.to_numpy()
for (rid, writer_info) in dwriter_info_dict.items():
agents = client.agent_group[rid]
batches = []
for episode in episodes:
agent_buffer = [episode[aid] for aid in agents]
batches.append(agent_buffer)
writer_info[(- 1)].put_nowait_batch(batches)
end = time.time()
rollout_info = client.env.collect_info()
except Exception as e:
traceback.print_exc()
raise e
performance = client.timer.todict()
performance['FPS'] = (client.env.batched_step_cnt / (end - start))
eval_results = rollout_info
performance['total_timesteps'] = client.env.batched_step_cnt
return (eval_results, performance) |
def register_Ns3CsmaNetDevice_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetInterframeGap', 'void', [param('ns3::Time', 't')])
cls.add_method('SetBackoffParams', 'void', [param('ns3::Time', 'slotTime'), param('uint32_t', 'minSlots'), param('uint32_t', 'maxSlots'), param('uint32_t', 'maxRetries'), param('uint32_t', 'ceiling')])
cls.add_method('Attach', 'bool', [param('ns3::Ptr< ns3::CsmaChannel >', 'ch')])
cls.add_method('SetQueue', 'void', [param('ns3::Ptr< ns3::Queue< ns3::Packet > >', 'queue')])
cls.add_method('GetQueue', 'ns3::Ptr< ns3::Queue< ns3::Packet > >', [], is_const=True)
cls.add_method('SetReceiveErrorModel', 'void', [param('ns3::Ptr< ns3::ErrorModel >', 'em')])
cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ptr< ns3::CsmaNetDevice >', 'sender')])
cls.add_method('IsSendEnabled', 'bool', [])
cls.add_method('SetSendEnable', 'void', [param('bool', 'enable')])
cls.add_method('IsReceiveEnabled', 'bool', [])
cls.add_method('SetReceiveEnable', 'void', [param('bool', 'enable')])
cls.add_method('SetEncapsulationMode', 'void', [param('ns3::CsmaNetDevice::EncapsulationMode', 'mode')])
cls.add_method('GetEncapsulationMode', 'ns3::CsmaNetDevice::EncapsulationMode', [])
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('AddHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Mac48Address', 'source'), param('ns3::Mac48Address', 'dest'), param('uint16_t', 'protocolNumber')], visibility='protected')
return |
def get_item():
train = pd.read_csv(PATH_TO_TRAIN, sep='\t', dtype={0: str, 1: str, 2: np.float32})
test = pd.read_csv(PATH_TO_TEST, sep='\t', dtype={0: str, 1: str, 2: np.float32})
data = pd.concat([train, test])
return data.ItemId.unique() |
def cal_acc(true_label_list, pred_label_list):
cor_num = 0
slide_num = len(true_label_list)
for i in range(slide_num):
if (true_label_list[i] == pred_label_list[i]):
cor_num += 1
return (cor_num / slide_num) |
class VFE_Layer(tf.keras.layers.Layer):
def __init__(self, c_out):
super(VFE_Layer, self).__init__()
self.units = (c_out // 2)
self.fcn = tf.keras.layers.Dense(self.units, activation='relu')
self.bn = tf.keras.layers.BatchNormalization(trainable=True)
def call(self, input, mask, training=False):
fcn_out = self.bn(self.fcn(input), training=training)
max_pool = tf.reduce_max(fcn_out, axis=2, keepdims=True)
tiled_max_pool = tf.tile(max_pool, [1, 1, tf.shape(fcn_out)[2], 1])
output = tf.concat([fcn_out, tiled_max_pool], axis=(- 1))
mask = tf.tile(mask, [1, 1, 1, (2 * self.units)])
return tf.multiply(output, tf.cast(mask, tf.float32)) |
_test_reporter('file')
_test_reporter('default')
class TestReporter(Dataset):
class Config():
candidate_fields: List[str] = field(default_factory=(lambda : DEFAULT_CANDIDATE_FIELDS))
predict_file_format: str = 'json'
def __init__(self, datamodules: List[pl.LightningDataModule], config: Config=None, dataset_type: str='train'):
self.test_reporter_config = OmegaConf.merge(OmegaConf.structured(self.Config), config)
self.datamodules = datamodules
self.dataset_type = dataset_type
self.config = registry.get('config')
self.report = []
self.timer = Timer()
self.training_config = self.config.training
self.num_workers = self.training_config.num_workers
self.batch_size = self.training_config.batch_size
self.report_folder_arg = get_mmf_env(key='report_dir')
self.experiment_name = self.training_config.experiment_name
self.current_datamodule_idx = (- 1)
self.dataset_names = list(self.datamodules.keys())
self.current_datamodule = self.datamodules[self.dataset_names[self.current_datamodule_idx]]
self.current_dataloader = None
self.save_dir = get_mmf_env(key='save_dir')
self.report_folder = ckpt_name_from_core_args(self.config)
self.report_folder += foldername_from_config_override(self.config)
self.report_folder = os.path.join(self.save_dir, self.report_folder)
self.report_folder = os.path.join(self.report_folder, 'reports')
if self.report_folder_arg:
self.report_folder = self.report_folder_arg
self.candidate_fields = self.test_reporter_config.candidate_fields
PathManager.mkdirs(self.report_folder)
log_class_usage('TestReporter', self.__class__)
def current_dataset(self):
self._check_current_dataloader()
return self.current_dataloader.dataset
def next_dataset(self, flush_report=True):
if (self.current_datamodule_idx >= 0):
if flush_report:
self.flush_report()
else:
self.report = []
self.current_datamodule_idx += 1
if (self.current_datamodule_idx == len(self.datamodules)):
return False
else:
self.current_datamodule = self.datamodules[self.dataset_names[self.current_datamodule_idx]]
logger.info(f'Predicting for {self.dataset_names[self.current_datamodule_idx]}')
return True
def flush_report(self):
if (not is_main()):
self.report = []
return
name = self.current_datamodule.dataset_name
time_format = '%Y-%m-%dT%H:%M:%S'
time = self.timer.get_time_hhmmss(None, format=time_format)
filename = (name + '_')
if (len(self.experiment_name) > 0):
filename += (self.experiment_name + '_')
filename += (self.dataset_type + '_')
filename += time
use_csv_writer = ((self.config.evaluation.predict_file_format == 'csv') or (self.test_reporter_config.predict_file_format == 'csv'))
if use_csv_writer:
filepath = os.path.join(self.report_folder, (filename + '.csv'))
self.csv_dump(filepath)
else:
filepath = os.path.join(self.report_folder, (filename + '.json'))
self.json_dump(filepath)
logger.info(f'Wrote predictions for {name} to {os.path.abspath(filepath)}')
self.report = []
def postprocess_dataset_report(self):
self._check_current_dataloader()
if hasattr(self.current_dataset, 'on_prediction_end'):
self.report = self.current_dataset.on_prediction_end(self.report)
def csv_dump(self, filepath):
with PathManager.open(filepath, 'w') as f:
title = self.report[0].keys()
cw = csv.DictWriter(f, title, delimiter=',', quoting=csv.QUOTE_MINIMAL)
cw.writeheader()
cw.writerows(self.report)
def json_dump(self, filepath):
with PathManager.open(filepath, 'w') as f:
json.dump(self.report, f)
def get_dataloader(self):
self.current_dataloader = getattr(self.current_datamodule, f'{self.dataset_type}_dataloader')()
if (not hasattr(self.current_dataloader, 'dataset')):
self.current_dataloader.dataset = getattr(self.current_datamodule, f'{self.dataset_type}_dataset')
return self.current_dataloader
def prepare_batch(self, batch):
self._check_current_dataloader()
if hasattr(self.current_dataset, 'prepare_batch'):
batch = self.current_dataset.prepare_batch(batch)
batch = convert_batch_to_sample_list(batch)
batch.dataset_name = self.current_dataset.dataset_name
batch.dataset_type = self.dataset_type
return batch
def __len__(self):
self._check_current_dataloader()
return len(self.current_dataloader)
def _check_current_dataloader(self):
assert (self.current_dataloader is not None), ('Please call `get_dataloader` before accessing any ' + "'current_dataloader' based function")
def add_to_report(self, report, model, *args, **kwargs):
if ('execute_on_master_only' in kwargs):
warnings.warn("'execute_on_master_only keyword is deprecated and isn't used anymore", DeprecationWarning)
self._check_current_dataloader()
for key in self.candidate_fields:
report = self.reshape_and_gather(report, key)
results = []
if hasattr(self.current_dataset, 'format_for_prediction'):
results = self.current_dataset.format_for_prediction(report)
if hasattr(model, 'format_for_prediction'):
results = model.format_for_prediction(results, report)
elif hasattr(model.module, 'format_for_prediction'):
results = model.module.format_for_prediction(results, report)
self.report = (self.report + results)
def reshape_and_gather(self, report, key):
if (key in report):
num_dims = report[key].dim()
if (num_dims == 1):
report[key] = gather_tensor(report[key]).view((- 1))
elif (num_dims >= 2):
other_dims = report[key].size()[1:]
report[key] = gather_tensor(report[key]).view((- 1), *other_dims)
return report |
class TestProcessingUnit(FixtureTest):
def test_from_path_with_seed(self):
max_int = 1000000.0
seed = 1
unit_0 = DummyProcessingUnit.from_path(None, random_state=seed)
int_0 = unit_0.random_state.randint(max_int)
unit_1 = DummyProcessingUnit.from_path(None, random_state=seed)
int_1 = unit_1.random_state.randint(max_int)
self.assertEqual(int_0, int_1) |
def polynomial_mmd_averages(codes_g, codes_r, n_subsets=50, subset_size=50, ret_var=True, output=sys.stdout, **kernel_args):
m = min(codes_g.shape[0], codes_r.shape[0])
subset_size = m
n_subsets = (max(codes_g.shape[0], codes_r.shape[0]) // subset_size)
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
with tqdm(range(n_subsets), desc='MMD', file=output) as bar:
for i in bar:
g = codes_g[choice(len(codes_g), subset_size, replace=False)]
mask = choice(len(codes_r), subset_size, replace=False)
r = codes_r[mask]
o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
(mmds[i], vars[i]) = o
else:
mmds[i] = o
bar.set_postfix({'mean': mmds[:(i + 1)].mean()})
return ((mmds, vars) if ret_var else mmds) |
_utils.test(exclude=[ti.opengl, ti.gles])
def test_loop_config_parallel_range_for():
n = (1024 * 1024)
val = ti.field(ti.i32, shape=n)
def fill():
ti.loop_config(parallelize=8, block_dim=8)
for i in range(n):
val[i] = i
fill()
val_np = val.to_numpy()
for i in range(n):
assert (val_np[i] == i) |
class TrackingBox(EvalBox):
def __init__(self, sample_token: str='', translation: Tuple[(float, float, float)]=(0, 0, 0), size: Tuple[(float, float, float)]=(0, 0, 0), rotation: Tuple[(float, float, float, float)]=(0, 0, 0, 0), velocity: Tuple[(float, float)]=(0, 0), ego_translation: Tuple[(float, float, float)]=(0, 0, 0), num_pts: int=(- 1), tracking_id: str='', tracking_name: str='', tracking_score: float=(- 1.0)):
super().__init__(sample_token, translation, size, rotation, velocity, ego_translation, num_pts)
assert (tracking_name is not None), 'Error: tracking_name cannot be empty!'
assert (tracking_name in TRACKING_NAMES), ('Error: Unknown tracking_name %s' % tracking_name)
assert (type(tracking_score) == float), 'Error: tracking_score must be a float!'
assert (not np.any(np.isnan(tracking_score))), 'Error: tracking_score may not be NaN!'
self.tracking_id = tracking_id
self.tracking_name = tracking_name
self.tracking_score = tracking_score
def __eq__(self, other):
return ((self.sample_token == other.sample_token) and (self.translation == other.translation) and (self.size == other.size) and (self.rotation == other.rotation) and (self.velocity == other.velocity) and (self.ego_translation == other.ego_translation) and (self.num_pts == other.num_pts) and (self.tracking_id == other.tracking_id) and (self.tracking_name == other.tracking_name) and (self.tracking_score == other.tracking_score))
def serialize(self) -> dict:
return {'sample_token': self.sample_token, 'translation': self.translation, 'size': self.size, 'rotation': self.rotation, 'velocity': self.velocity, 'ego_translation': self.ego_translation, 'num_pts': self.num_pts, 'tracking_id': self.tracking_id, 'tracking_name': self.tracking_name, 'tracking_score': self.tracking_score}
def deserialize(cls, content: dict):
return cls(sample_token=content['sample_token'], translation=tuple(content['translation']), size=tuple(content['size']), rotation=tuple(content['rotation']), velocity=tuple(content['velocity']), ego_translation=((0.0, 0.0, 0.0) if ('ego_translation' not in content) else tuple(content['ego_translation'])), num_pts=((- 1) if ('num_pts' not in content) else int(content['num_pts'])), tracking_id=content['tracking_id'], tracking_name=content['tracking_name'], tracking_score=((- 1.0) if ('tracking_score' not in content) else float(content['tracking_score']))) |
def prompt_for_aspect_inferring(context, target):
new_context = f'Given the sentence "{context}", '
prompt = (new_context + f'which specific aspect of {target} is possibly mentioned?')
return (new_context, prompt) |
def CreateConv2dFixedChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, conv_kinds=[ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], epilogue_functor=EpilogueFunctor.LinearCombination, swizzling_functor=SwizzlingFunctor.Identity4):
(element_a, element_b, element_c, element_epilogue) = data_type
iterator_algorithms = [IteratorAlgorithm.FixedChannels]
if (manifest.kernel_filter == ''):
tile_descriptions = [tile_descriptions[0]]
channel_counts = [channel_counts[0]]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
if (ConvKind.Fprop in conv_kinds):
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile, A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation) |
class AttentionEnhancementModule(nn.Module):
def __init__(self, in_chan, out_chan):
super(AttentionEnhancementModule, self).__init__()
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
self.conv_atten = Attention(out_chan)
self.bn_atten = BatchNorm2d(out_chan)
self.init_weight()
def forward(self, x):
feat = self.conv(x)
att = self.conv_atten(feat)
return self.bn_atten(att)
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0) |
class TripleConv(nn.Module):
def __init__(self, in_channels, out_channels, reverse=False):
super().__init__()
if reverse:
self.triple_conv = nn.Sequential(Conv3x3BNReLU(in_channels, in_channels, stride=1), Conv3x3BNReLU(in_channels, in_channels, stride=1), Conv3x3BNReLU(in_channels, out_channels, stride=1))
else:
self.triple_conv = nn.Sequential(Conv3x3BNReLU(in_channels, out_channels, stride=1), Conv3x3BNReLU(out_channels, out_channels, stride=1), Conv3x3BNReLU(out_channels, out_channels, stride=1))
def forward(self, x):
return self.triple_conv(x) |
def test_trident_resnet_bottleneck():
trident_dilations = (1, 2, 3)
test_branch_idx = 1
concat_output = True
trident_build_config = (trident_dilations, test_branch_idx, concat_output)
with pytest.raises(AssertionError):
TridentBottleneck(*trident_build_config, inplanes=64, planes=64, style='tensorflow')
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv4')]
TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(KeyError):
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([block.num_branch, 64, 56, 56]))
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=64, stride=2, style='pytorch')
assert (block.conv1.stride == (1, 1))
assert (block.conv2.stride == (2, 2))
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')
assert (block.conv1.stride == (2, 2))
assert (block.conv2.stride == (1, 1))
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([block.num_branch, 64, 56, 56]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert (block.context_block.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([block.num_branch, 64, 56, 56]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2')]
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert (block.gen_attention_block.in_channels == 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([block.num_branch, 64, 56, 56]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert (block.gen_attention_block.in_channels == 16)
assert (block.nonlocal_block.in_channels == 16)
assert (block.context_block.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([block.num_branch, 64, 56, 56]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=1), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=2), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=3), position='after_conv3')]
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert (block.context_block1.in_channels == 16)
assert (block.context_block2.in_channels == 64)
assert (block.context_block3.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([block.num_branch, 64, 56, 56])) |
class UnpairedImageVal(UnpairedImageBase):
def __init__(self, size=None, random_crop=False, folder1=None, folder2=None, numpy_folder1=None, numpy_folder2=None, wikiart_info1=None, wikiart_key1=None, wikiart_info2=None, wikiart_key2=None):
super().__init__()
self.data = UnpairedImagePaths(size=size, random_crop=random_crop, folder1=folder1, folder2=folder2, numpy_folder1=numpy_folder1, numpy_folder2=numpy_folder2, wikiart_info1=wikiart_info1, wikiart_key1=wikiart_key1, wikiart_info2=wikiart_info2, wikiart_key2=wikiart_key2)
self.data._length = min(self.data._length, 1000) |
def basic_model():
random_uniform = initializers.random_uniform(0, 1)
inputs = Input(shape=(8, 8, 3))
x = Conv2D(2, 3, padding='same', name='conv2d')(inputs)
x_bn = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', moving_mean_initializer='random_normal', moving_variance_initializer=random_uniform, name='bn1')(x)
outputs = ReLU()(x_bn)
model = keras.Model(inputs=inputs, outputs=outputs)
return (model, getattr(model.layers[1], KERNEL).numpy().flatten().shape[0], compute_output_size(model.layers[0].output_shape)) |
class MaskTokensDataset(BaseWrapperDataset):
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
dataset = LRUCacheDataset(dataset)
return (LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)), LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)))
def __init__(self, dataset: torch.utils.data.Dataset, vocab: Dictionary, pad_idx: int, mask_idx: int, return_masked_tokens: bool=False, seed: int=1, mask_prob: float=0.15, leave_unmasked_prob: float=0.1, random_token_prob: float=0.1, freq_weighted_replacement: bool=False, mask_whole_words: torch.Tensor=None):
assert (0.0 < mask_prob < 1.0)
assert (0.0 <= random_token_prob <= 1.0)
assert (0.0 <= leave_unmasked_prob <= 1.0)
assert ((random_token_prob + leave_unmasked_prob) <= 1.0)
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
if (random_token_prob > 0.0):
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[:self.vocab.nspecial] = 0
self.weights = (weights / weights.sum())
self.epoch = 0
def can_reuse_epoch_itr_across_epochs(self):
return True
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
_cache(maxsize=8)
def __getitem__(self, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert (self.mask_idx not in item), 'Dataset contains mask_idx (={}), this is not expected!'.format(self.mask_idx)
if (self.mask_whole_words is not None):
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view((- 1))
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert (len(words) == sz)
word_lens = list(map(len, words))
mask = np.full(sz, False)
num_mask = int(((self.mask_prob * sz) + np.random.rand()))
mask[np.random.choice(sz, num_mask, replace=False)] = True
if self.return_masked_tokens:
if (self.mask_whole_words is not None):
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[(torch.from_numpy(mask.astype(np.uint8)) == 1)]
return torch.from_numpy(new_item)
rand_or_unmask_prob = (self.random_token_prob + self.leave_unmasked_prob)
if (rand_or_unmask_prob > 0.0):
rand_or_unmask = (mask & (np.random.rand(sz) < rand_or_unmask_prob))
if (self.random_token_prob == 0.0):
unmask = rand_or_unmask
rand_mask = None
elif (self.leave_unmasked_prob == 0.0):
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = (self.leave_unmasked_prob / rand_or_unmask_prob)
decision = (np.random.rand(sz) < unmask_prob)
unmask = (rand_or_unmask & decision)
rand_mask = (rand_or_unmask & (~ decision))
else:
unmask = rand_mask = None
if (unmask is not None):
mask = (mask ^ unmask)
if (self.mask_whole_words is not None):
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if (rand_mask is not None):
num_rand = rand_mask.sum()
if (num_rand > 0):
if (self.mask_whole_words is not None):
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(len(self.vocab), num_rand, p=self.weights)
return torch.from_numpy(new_item) |
class Encoder(chainer.Chain):
def __init__(self, nb_inputs, channel_list, ksize_list, pad_list=[]):
super(Encoder, self).__init__()
self.nb_layers = len(channel_list)
channel_list = ([nb_inputs] + channel_list)
if (len(pad_list) == 0):
pad_list = [0 for _ in range(len(ksize_list))]
for (idx, (nb_in, nb_out, ksize, pad)) in enumerate(zip(channel_list[:(- 1)], channel_list[1:], ksize_list, pad_list)):
self.add_link('conv{}'.format(idx), Conv_BN(nb_in, nb_out, ksize, pad))
def __call__(self, x):
h = F.swapaxes(x, 1, 2)
for idx in range(self.nb_layers):
h = getattr(self, 'conv{}'.format(idx))(h)
return h |
class AppDirs(object):
def __init__(self, appname=None, appauthor=None, version=None, roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming)
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath)
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming)
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath)
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor, version=self.version)
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor, version=self.version)
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor, version=self.version) |
class CNN_Text(nn.Module):
def __init__(self, args):
super(CNN_Text, self).__init__()
self.args = args
V = args.embed_num
D = args.embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
self.embed = nn.Embedding(V, D)
self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])
self.dropout = nn.Dropout(args.dropout)
self.fc1 = nn.Linear((len(Ks) * Co), C)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self, x):
x = self.embed(x)
if self.args.static:
x = Variable(x)
x = x.unsqueeze(1)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
x = self.dropout(x)
logit = self.fc1(x)
return logit |
.parametrize('prior', (EP_PRIORS + [MAP_L21NormPrior(size=(2, 100), gamma=3, isotropic=False), MAP_L21NormPrior(size=(3, 100), gamma=5, isotropic=False)]))
def test_prior_grad_EP_diagonal(prior):
assert (not prior.isotropic)
df = check_prior_grad_EP_diagonal(prior)
assert_allclose(df['rx'], df['grad_bx_A1'], rtol=0, atol=EPSILON)
assert_allclose(df['vx'], df['grad_bx_A2'], rtol=0, atol=EPSILON) |
class EMAConfig(FairseqDataclass):
store_ema: bool = field(default=False, metadata={help: 'store exponential moving average shadow model'})
ema_decay: float = field(default=0.9999, metadata={'help': 'decay for exponential moving average model'})
ema_start_update: int = field(default=0, metadata={'help': 'start EMA update after this many model updates'})
ema_seed_model: Optional[str] = field(default=None, metadata={'help': 'Seed to load EMA model from. Used to load EMA model separately from the actual model.'})
ema_update_freq: int = field(default=1, metadata={'help': 'Do EMA update every this many model updates'})
ema_fp32: bool = field(default=False, metadata={'help': 'If true, store EMA model in fp32 even if model is in fp16'}) |
class TorchTrainingRun(TrainingRun):
def __init__(self, config, save_dir):
super(TorchTrainingRun, self).__init__(config, save_dir)
self.workspace.add_dir('checkpoints', 'checkpoints')
_property
def checkpoints(self):
return Checkpoints(self.workspace.checkpoints)
def _finite_grads(cls, parameters):
for param in parameters:
if (param.grad is None):
continue
if (not np.isfinite(param.grad.data.sum())):
return False
return True
def _take_grad_step(cls, train_state, loss, max_grad_norm=float('inf')):
(model, optimizer) = (train_state.model, train_state.optimizer)
optimizer.zero_grad()
loss.backward()
grad_norm = clip_grad_norm(model.parameters(), max_grad_norm, norm_type=2)
train_state.track_grad_norms(grad_norm)
finite_grads = cls._finite_grads(model.parameters())
if finite_grads:
optimizer.step()
train_state.increment_train_steps()
return (finite_grads, grad_norm)
def _log_stats(self, stats, step):
for (path, val) in stats.iteritems():
name = '_'.join(path)
self.tb_logger.log_value(name, val, step)
with self.metadata.name_scope_path((['stats'] + list(path[:(- 1)]))):
self.metadata[path[(- 1)]] = val
def _update_metadata(self, train_state):
self.metadata['last_seen'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.metadata['steps'] = train_state.train_steps
self.metadata['max_grad_norm'] = train_state.max_grad_norm |
class State():
def __init__(self, model, optimizer=None, scheduler=None, epoch=None):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.epoch = epoch
def save(self, filepath):
model = self.model
if (not isinstance(model, dict)):
model = model.state_dict()
model_class = self.model.__class__.__name__
optimizer = self.optimizer
if ((not isinstance(optimizer, dict)) and (optimizer is not None)):
optimizer = optimizer.state_dict()
scheduler = self.scheduler
if ((not isinstance(scheduler, dict)) and (scheduler is not None)):
scheduler = scheduler.state_dict()
epoch = self.epoch
assert (utils.get_class('models', model_class) is not False)
arguments = dict(((key, getattr(self.model, key)) for key in dir(self.model) if ((not callable(getattr(self.model, key))) and (not key.startswith('_')) and (not (key == 'kwargs')))))
kwargs = getattr(self.model, 'kwargs', None)
utils.makedir(os.path.dirname(filepath))
torch.save({'model': model, 'model_class': model_class, 'optimizer': optimizer, 'scheduler': scheduler, 'epoch': epoch, 'arguments': arguments, 'kwargs': kwargs}, filepath)
def checkpoint(filepath, model, optimizer=None, scheduler=None, epoch=None):
state = State(model, optimizer, scheduler, epoch)
state.save(filepath)
def load(filepath):
assert os.path.exists(filepath), ('file %s not found' % filepath)
checkpoint = torch.load(filepath, map_location=(lambda storage, loc: storage))
model_class = utils.get_class('models', checkpoint['model_class'])
if ('kwargs' in checkpoint):
arguments = {**checkpoint['arguments'], **checkpoint['kwargs']}
else:
arguments = {**checkpoint['arguments']}
model = model_class(**arguments)
model.load_state_dict(checkpoint['model'])
state = State(model, checkpoint['optimizer'], checkpoint['scheduler'], checkpoint['epoch'])
del checkpoint
torch.cuda.empty_cache()
return state |
class Transformer(nn.Module):
def __init__(self, config, src_vocab, target_vocab, s_v, t_v, u):
super(Transformer, self).__init__()
self.config = config
(h, N, dropout) = (self.config.h, self.config.N, self.config.dropout)
(d_model, d_ff) = (self.config.d_model, self.config.d_ff)
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
attncross = MultiHeadedAttention(h, (d_model * 2))
ffcross = PositionwiseFeedForward((d_model * 2), d_ff, dropout)
positioncross = PositionalEncoding((d_model * 2), dropout)
self.encoder = Encoder(EncoderLayer(config.d_model, deepcopy(attn), deepcopy(ff), dropout), N)
self.encoder_cross = EncoderCross(EncoderLayerCross((config.d_model * 2), deepcopy(attncross), deepcopy(ffcross), dropout), N)
self.src_embed = nn.Sequential(Embeddings(config.d_model, src_vocab, s_v, u), deepcopy(position))
self.target_embed = nn.Sequential(Embeddings(config.d_model, target_vocab, t_v, u), deepcopy(position))
self.fc = nn.Linear(self.config.d_model, self.config.output_size)
self.sigmoid = nn.Sigmoid()
self.cos = nn.CosineSimilarity(dim=1, eps=1e-06)
self.softmax = nn.Softmax()
def forward(self, x1, x2, idx, type='default'):
idx = idx.data.cpu().numpy()
if (config.EmbeddingType == 'Contextualized'):
embedded_sents1 = return_emb1(int(idx[0]), idx.size)
embedded_sents2 = return_emb2(int(idx[0]), idx.size)
embedded_sents1 = torch.from_numpy(embedded_sents1).float().cuda()
embedded_sents2 = torch.from_numpy(embedded_sents2).float().cuda()
else:
embedded_sents1 = self.src_embed(x1.permute(1, 0))
embedded_sents2 = self.target_embed(x2.permute(1, 0))
encoded_sents1 = self.encoder(embedded_sents1, embedded_sents1)
encoded_sents2 = self.encoder(embedded_sents2, embedded_sents2)
j = 0
final_feature_map1 = torch.mean(encoded_sents1, 1)
final_feature_map2 = torch.mean(encoded_sents2, 1)
j = 0
final_out1 = final_feature_map1
final_out2 = final_feature_map2
output = self.cos(final_out1, final_out2)
j = 0
return output
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print('Reducing LR')
for g in self.optimizer.param_groups:
g['lr'] = (g['lr'] / 2)
def run_epoch(self, train_iterator, val_iterator, test_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
self.train()
if ((epoch == int((self.config.max_epochs / 3))) or (epoch == int(((2 * self.config.max_epochs) / 3)))):
self.reduce_lr()
for (i, batch) in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x1 = batch.text1.cuda()
x2 = batch.text2.cuda()
y = batch.label.type(torch.cuda.FloatTensor)
idx = batch.index.type(torch.cuda.FloatTensor)
y_pred = self.__call__(x1, x2, idx, y)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if ((i % 100) == 0):
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
losses = []
(trainfilename, testfilename, validfilename) = return_file_name()
print('Evaluating Epoch')
config = Config()
(val_accuracy, v_c) = evaluate_model(self, val_iterator, filename=validfilename)
(test_accuracy, t_c) = evaluate_model(self, test_iterator, filename=testfilename)
print(('validation \t' + str((val_accuracy * config.multiplyby))))
print(('test \t' + str((test_accuracy * config.multiplyby))))
return (train_losses, val_accuracy, v_c) |
class Gatv2MolConfig(MolConfig):
def model(self, hparams):
return GatHIVNet(hidden_dim=self.hidden, num_graph_layers=NUM_LAYERS, in_feat_drop=hparams['dropout'], residual=True, gat_version=2)
def pretrained(self, model_dir):
return load_pretrained(self, dataset_name='hiv', model_name='gatv2', hidden=self.hidden, model_dir=model_dir, pretrained_conf=PRETRAINED_CONF) |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = FLAGS.log_dir
if (not os.path.exists(directory)):
os.makedirs(directory)
filename = (directory + filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, (directory + 'model_best.pth.tar')) |
def _linear(args, output_size, bias, bias_initializer=None, kernel_initializer=None):
if ((args is None) or (nest.is_sequence(args) and (not args))):
raise ValueError('`args` must be specified')
if (not nest.is_sequence(args)):
args = [args]
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if (shape.ndims != 2):
raise ValueError(('linear is expecting 2D arguments: %s' % shapes))
if (shape[1].value is None):
raise ValueError(('linear expects shape[1] to be provided for shape %s, but saw %s' % (shape, shape[1])))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype, initializer=kernel_initializer)
if (len(args) == 1):
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if (not bias):
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if (bias_initializer is None):
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(_BIAS_VARIABLE_NAME, [output_size], dtype=dtype, initializer=bias_initializer)
return nn_ops.bias_add(res, biases) |
class AlphaDropout(_DropoutNd):
def forward(self, input):
return F.alpha_dropout(input, self.p, self.training) |
def get_mock_args(finetune_from_model=None):
args_mock = MagicMock()
args_mock.optimizer_overrides = '{}'
args_mock.reset_dataloader = False
args_mock.reset_meters = False
args_mock.reset_optimizer = False
args_mock.reset_lr_scheduler = False
args_mock.finetune_from_model = finetune_from_model
args_mock.model_parallel_size = 1
return args_mock |
class _BaseWarmupScheduler(_LRScheduler):
def __init__(self, optimizer, successor, warmup_epoch, last_epoch=(- 1), verbose=False):
self.successor = successor
self.warmup_epoch = warmup_epoch
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
if (self.last_epoch >= self.warmup_epoch):
self.successor.step(epoch)
self._last_lr = self.successor.get_last_lr()
else:
super().step(epoch) |
class SyncAsyncTaskDecoFactory():
def wrapper(self, func, *args, **kwargs):
(yield)
def __call__(self, func):
self.is_coroutine = asyncio.iscoroutinefunction(func)
str_fmt = '{} Method ({}); Co-routine {}'
(func)
def sync_wrapper(*args, **kwargs):
logger.debug(str_fmt.format('sync', func.__name__, self.is_coroutine))
with self.wrapper(func, *args, **kwargs):
return self.task.sync_execute()
(func)
async def async_wrapper(*args, **kwargs):
logger.debug(str_fmt.format('async', func.__name__, self.is_coroutine))
with self.wrapper(func, *args, **kwargs):
return (await self.task.async_execute())
if self.is_coroutine:
return async_wrapper
return sync_wrapper |
class ActivationFinalBitwidthConfigVisualizer():
def __init__(self, final_activation_nodes_config: List[Tuple[(BaseNode, int)]]):
self.final_activation_nodes_config = final_activation_nodes_config
self.node_final_bitwidth = [node_cfg[1] for node_cfg in self.final_activation_nodes_config]
self.bar_width = 1
self.vis_comp_rates = {4.0: 'tomato', 8.0: 'orange', 12.0: 'limegreen'}
def plot_config_bitwidth(self) -> Figure:
layers_loc = [i for i in range(1, (len(self.node_final_bitwidth) + 1))]
(fig, ax) = plt.subplots()
plt.bar(layers_loc, self.node_final_bitwidth, width=self.bar_width, align='center')
plt.grid()
plt.xlabel('Layer Index', fontsize=12)
plt.ylabel('Number of bits', fontsize=12)
plt.tight_layout()
return fig
def plot_tensor_sizes(self, graph: Graph) -> Figure:
tensors_sizes = [((4.0 * n.get_total_output_params()) / 1000000.0) for n in graph.get_sorted_activation_configurable_nodes()]
max_tensor_size = max(tensors_sizes)
max_lines = [(rate, (max_tensor_size / rate), color) for (rate, color) in self.vis_comp_rates.items()]
layers_loc = [i for i in range(1, (len(self.final_activation_nodes_config) + 1))]
(fig, ax) = plt.subplots()
plt.bar(layers_loc, tensors_sizes, width=self.bar_width, align='center')
plt.grid()
plt.xlabel('Layer Index', fontsize=12)
plt.ylabel('Tensor Size [MB]', fontsize=12)
for (rate, t_size, color) in max_lines:
plt.plot([layers_loc[0], layers_loc[(- 1)]], [t_size, t_size], 'k--', color=color, label=f'ACR = {rate}')
plt.legend()
plt.tight_layout()
return fig |
class OthelloNNet():
def __init__(self, game, args):
(self.board_x, self.board_y) = game.getBoardSize()
self.action_size = game.getActionSize()
self.args = args
self.input_boards = Input(shape=(self.board_x, self.board_y))
x_image = Reshape((self.board_x, self.board_y, 1))(self.input_boards)
h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(args.num_channels, 3, padding='same', use_bias=False)(x_image)))
h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(args.num_channels, 3, padding='same', use_bias=False)(h_conv1)))
h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(args.num_channels, 3, padding='valid', use_bias=False)(h_conv2)))
h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(args.num_channels, 3, padding='valid', use_bias=False)(h_conv3)))
h_conv4_flat = Flatten()(h_conv4)
s_fc1 = Dropout(args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(1024, use_bias=False)(h_conv4_flat))))
s_fc2 = Dropout(args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(512, use_bias=False)(s_fc1))))
self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2)
self.v = Dense(1, activation='tanh', name='v')(s_fc2)
self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(args.lr)) |
def build_trie():
from glob import glob
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')
ss = []
for cmd in glob('./data/tldr/manual_trimmed/*.txt'):
cmd = os.path.basename(cmd).replace('.txt', '')
tok_cmd = tokenizer(f' {cmd}')['input_ids']
ss.append((([(- 1)] + tok_cmd) + [(- 2)]))
print(f'number of commands: {len(ss)}')
trie = Trie(ss)
with open('./data/tldr/nl.cm/cmd_trie.pkl', 'wb') as f:
pickle.dump(trie.trie_dict, f) |
def stem(string, language, resources):
from snips_nlu_utils import normalize
normalized_string = normalize(string)
tokens = tokenize_light(normalized_string, language)
stemmed_tokens = [_stem(token, resources) for token in tokens]
return ' '.join(stemmed_tokens) |
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')])
return |
class polylr(object):
def __init__(self, optimizer, nb, lr):
self.nb = nb
self.lr = lr
self.optimizer = optimizer
self.iteration = 0
def step(self):
self.iteration += 1
lr = self.calc_lr()
self.update_lr(self.optimizer, lr)
def calc_lr(self):
lr = (self.lr * ((1 - (float(self.iteration) / self.nb)) ** 0.9))
return lr
def update_lr(self, optimizer, lr):
for g in optimizer.param_groups:
g['lr'] = lr
def state_dict(self):
return {'iteration': self.iteration}
def load_state_dict(self, state_dict):
self.iteration = state_dict['iteration'] |
def show_versions():
sys_info = _get_sys_info()
deps_info = _get_deps_info()
print('\nSystem:')
for (k, stat) in sys_info.items():
print('{k:>10}: {stat}'.format(k=k, stat=stat))
print('\nPython dependencies:')
for (k, stat) in deps_info.items():
print('{k:>13}: {stat}'.format(k=k, stat=stat))
print('\n{k}: {stat}'.format(k='Built with OpenMP', stat=_openmp_parallelism_enabled()))
threadpool_results = threadpool_info()
if threadpool_results:
print()
print('threadpoolctl info:')
for (i, result) in enumerate(threadpool_results):
for (key, val) in result.items():
print(f'{key:>15}: {val}')
if (i != (len(threadpool_results) - 1)):
print() |
def fourier_ellipsoid(input, size, n=(- 1), axis=(- 1), output=None):
input = numpy.asarray(input)
if (input.ndim > 3):
raise NotImplementedError('Only 1d, 2d and 3d inputs are supported')
output = _get_output_fourier(output, input)
if (output.size == 0):
return output
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if (not sizes.flags.contiguous):
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return output |
.parametrize('max_kl_weight', [1.0, 2.0])
def test_compute_kl_weight_no_annealing(max_kl_weight):
assert (_compute_kl_weight(1, 1, None, None, max_kl_weight, 0.0) == max_kl_weight) |
class FWGDMAType(Enum):
DEFAULT = (- 1)
LD_INPUT_NEURON = 0
ST_OUTPUT_NEURON = 1
LD_ITM_NEURON = 2
ST_ITM_NEURON = 3
LD_COEFF = 4
LD_COEFF_NERUON = 5
LD_COEFF_WINOGRAD = 6
MV_ITM_NEURON = 7
MV_OUTPUT_NEURON = 8
MV_ITM_EXTEND_NEURON = 9
ST_ITM_EXTEND_NEURON = 10
LD_G2L2 = 11
ST_OUTPUT_EXTEND_NEURON = 12
LD_ITM_EXTEND_NEURON = 13 |
def create_inception_v4(nb_classes=int(args['num_classes']), load_weights=check):
init = Input((299, 299, 3))
x = inception_stem(init)
for i in range(4):
x = inception_A(x)
x = reduction_A(x)
for i in range(7):
x = inception_B(x)
x = reduction_B(x)
for i in range(3):
x = inception_C(x)
x = AveragePooling2D((8, 8))(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
out = Dense(output_dim=nb_classes, activation='softmax')(x)
model = Model(init, out, name='Inception-v4')
if (check == True):
weights = checkpoint_path
model.load_weights(weights, by_name=True)
print('Model weights loaded.')
return model |
def format_stack_entry(r):
repr_str = repr(r)
if ('\n' in repr_str):
repr_str = repr(repr_str)
if (len(repr_str) < 16):
return repr_str
else:
return ('<%s 0x%x>' % (type(r).__name__, id(r))) |
class UrsemWaves(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [((- 0.9), 1.2), ((- 1.2), 1.2)]
self.global_optimum = [[1.2 for _ in range(self.N)]]
self.fglob = (- 8.5536)
def fun(self, x, *args):
self.nfev += 1
u = ((- 0.9) * (x[0] ** 2))
v = ((((x[1] ** 2) - (4.5 * (x[1] ** 2))) * x[0]) * x[1])
w = ((4.7 * cos(((3 * x[0]) - ((x[1] ** 2) * (2 + x[0]))))) * sin(((2.5 * pi) * x[0])))
return ((u + v) + w) |
('Direct')
def AddDirectGradient(op, g_output):
return (CopyDeviceOption(CreateOperator('DirectGradient', NeedAll(op, g_output), GIS(op)), op), GIS(op)) |
def save_npz(file, matrix, compressed=True):
arrays_dict = {}
if (matrix.format in ('csc', 'csr', 'bsr')):
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
elif (matrix.format == 'dia'):
arrays_dict.update(offsets=matrix.offsets)
elif (matrix.format == 'coo'):
arrays_dict.update(row=matrix.row, col=matrix.col)
else:
msg = f'Save is not implemented for sparse matrix of format {matrix.format}.'
raise NotImplementedError(msg)
arrays_dict.update(format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data)
if isinstance(matrix, sp.sparse.sparray):
arrays_dict.update(_is_array=True)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict) |
def test(model, test_loader, num_nodes, target, device):
model.eval()
correct = 0
total_loss = 0
n_graphs = 0
with torch.no_grad():
for (idx, data) in enumerate(test_loader):
out = model(data.to(device))
total_loss += F.nll_loss(out, target).item()
pred = out.max(1)[1]
correct += pred.eq(target).sum().item()
n_graphs += data.num_graphs
return ((correct / (n_graphs * num_nodes)), (total_loss / len(test_loader))) |
def _fix_lane_names(label):
l_counter = 0
r_counter = 0
mapping = {}
lane_ids = [lane['lane_id'] for lane in label['lanes']]
for key in sorted(lane_ids):
if (key[0] == 'l'):
mapping[key] = ('l' + str(l_counter))
l_counter += 1
if (key[0] == 'r'):
mapping[key] = ('r' + str(r_counter))
r_counter += 1
for lane in label['lanes']:
lane['lane_id'] = mapping[lane['lane_id']] |
class Partition14(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:14'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.21', 'l_1': 'decoder.block.22', 'l_2': 'decoder.block.23', 'l_3': 'decoder.final_layer_norm', 'l_4': 'decoder.dropout', 'l_5': 'lm_head'}
self.to(self.device)
def forward(self, *args):
(labels, x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_1 = self.l_3(t_1)
t_2 = t_0[2]
t_0 = t_0[3]
t_1 = self.l_4(t_1)
t_1 = (t_1 * 0.03125)
t_1 = self.l_5(t_1)
t_3 = t_1.size((- 1))
t_3 = t_1.view((- 1), t_3)
t_1 = labels.view((- 1))
t_1 = torch.nn.functional.cross_entropy(t_3, t_1, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean')
return (t_1,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def do_int(value, default=0, base=10):
try:
if isinstance(value, string_types):
return int(value, base)
return int(value)
except (TypeError, ValueError):
try:
return int(float(value))
except (TypeError, ValueError):
return default |
class RandomStrongHopper(ModifiableRoboschoolHopper):
def randomize_power(self):
self.power = self.np_random.uniform(self.RANDOM_LOWER_POWER, self.RANDOM_UPPER_POWER)
def _reset(self, new=True):
if new:
self.randomize_power()
return super(RandomStrongHopper, self)._reset(new)
def parameters(self):
parameters = super(RandomStrongHopper, self).parameters
parameters.update({'power': self.power})
return parameters |
def determine_redshift_from_filename(filename):
filename = os.path.basename(filename)
filename = os.path.splitext(filename)[0]
number_strs = []
last_was_char = True
for s in filename:
if (s.isdigit() or (s == '.')):
if last_was_char:
number_strs.append([])
last_was_char = False
number_strs[(- 1)].append(s)
else:
last_was_char = True
longest_idx = 0
for i in range(len(number_strs)):
if (len(number_strs[i]) > len(number_strs[longest_idx])):
longest_idx = i
number_strs[i] = ''.join(number_strs[i])
if (len(number_strs) == 0):
return (- 1)
return float(number_strs[longest_idx]) |
class MORPH_TRANSFORMATIONS(Enum):
EROSION = 'erosion'
DILATION = 'dilation'
OPENING = 'opening'
CLOSING = 'closing'
GRADIENT = 'gradient' |
def load_test_data(train_path, filelist):
sent_size = 0
examples = []
instance_size = 0
for fil in filelist:
line_co = 0
readfile = codecs.open(((train_path + '/') + fil), 'r', 'utf-8')
for line in readfile:
if (line_co == 0):
line_group = []
elif (len(line.strip()) > 0):
line_group.append(line.strip())
else:
sent_size += 1
assert (len(line_group) > 0)
negation_size = ((len(line_group[0].split('\t')) - 7) // 3)
if (negation_size > 0):
for i in range(negation_size):
sent = []
pos = []
cue = []
scope = []
for subline in line_group:
parts = subline.strip().split('\t')
sent.append(parts[3])
pos.append(parts[5])
cue.append(('0' if (parts[(7 + (i * 3))] == '_') else '1'))
scope.append(('0' if (parts[(8 + (i * 3))] == '_') else '1'))
guid = ('train-' + str(instance_size))
examples.append(InputExample(guid=guid, text=' '.join(sent), cue_labels=cue, scope_labels=scope))
instance_size += 1
else:
sent = []
pos = []
cue = []
scope = []
for subline in line_group:
parts = subline.strip().split('\t')
sent.append(parts[3])
pos.append(parts[5])
cue.append('0')
scope.append('0')
guid = ('train-' + str(instance_size))
examples.append(InputExample(guid=guid, text=' '.join(sent), cue_labels=cue, scope_labels=scope))
instance_size += 1
'create empty for next sentence'
line_group = []
line_co += 1
readfile.close()
print('load over, test size:', len(examples), 'sent size:', (sent_size + 1))
return examples |
class Histogram(object):
def __init__(self, training_instances, names, granularity=(1, 1, 1), use_progress=False):
self.names = names
self.buckets = defaultdict(Counter)
self.bucket_counts = defaultdict(int)
self.granularity = granularity
self.bucket_sizes = ((360 // granularity[0]), (100 // granularity[1]), (100 // granularity[2]))
self.use_progress = use_progress
self.add_data(training_instances)
def add_data(self, training_instances):
if self.use_progress:
progress.start_task('Example', len(training_instances))
for (i, inst) in enumerate(training_instances):
if self.use_progress:
progress.progress(i)
bucket = self.get_bucket(inst.input)
self.buckets[bucket][inst.output] += 1
self.bucket_counts[bucket] += 1
if self.use_progress:
progress.end_task()
def get_bucket(self, color):
return tuple(((s * min(int((d // s)), (g - 1))) for (d, s, g) in zip(color, self.bucket_sizes, self.granularity)))
def get_probs(self, color):
bucket = self.get_bucket(color)
counter = self.buckets[bucket]
bucket_size = self.bucket_counts[bucket]
probs = []
for name in self.names:
prob = (((counter[name] * 1.0) / bucket_size) if (bucket_size != 0) else (1.0 / len(self.names)))
probs.append(prob)
return probs
def num_params(self):
return sum((len(counter) for (_name, counter) in self.buckets.items()))
def __getstate__(self):
state = dict(self.__dict__)
for name in ('buckets', 'bucket_counts'):
state[name] = dict(state[name])
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.buckets = defaultdict(Counter, self.buckets)
self.bucket_counts = defaultdict(int, self.bucket_counts) |
class AttFusion(nn.Module):
def __init__(self, input_dim=[512, 512], hidden_dim=128):
super(AttFusion, self).__init__()
self.use_proj = (input_dim[1] != input_dim[0])
if self.use_proj:
self.proj_v = nn.Linear(input_dim[1], input_dim[0])
self.scorer_a = GRU(input_dim[0], hidden_dim, 1, 1, 1)
self.scorer_v = GRU(input_dim[0], hidden_dim, 1, 1, 1)
def forward(self, x_a, x_v):
if self.use_proj:
x_v = self.proj_v(x_v)
h_v = torch.sigmoid(self.scorer_v(x_v))
h_a = torch.sigmoid(self.scorer_a(x_a))
h = torch.cat((h_v, h_a), dim=(- 1))
h = F.softmax(h, dim=(- 1))
f = ((h[(..., 0)].unsqueeze((- 1)) * x_v) + (h[(..., 1)].unsqueeze((- 1)) * x_a))
return f |
def test_anntorchdataset_from_manager(adata):
adata_manager = generic_setup_adata_manager(adata)
bd = adata_manager.create_torch_dataset()
assert isinstance(bd, AnnTorchDataset)
bd = adata_manager.create_torch_dataset(indices=np.arange(adata.n_obs))
assert isinstance(bd, torch.utils.data.Subset) |
def test_build_vanilla_deep_gp_returns_correct_defaults() -> None:
search_space = (Box([0.0], [1.0]) ** 4)
x = search_space.sample(100)
data = mk_dataset(x, quadratic(x))
(empirical_mean, empirical_variance, _) = _get_data_stats(data)
num_inducing = min(MAX_NUM_INDUCING_POINTS, (NUM_INDUCING_POINTS_PER_DIM * search_space.dimension))
vanilla_deep_gp = build_vanilla_deep_gp(data, search_space)
assert isinstance(vanilla_deep_gp, DeepGP)
assert (len(vanilla_deep_gp.f_layers) == NUM_LAYERS)
assert isinstance(vanilla_deep_gp.f_layers[(- 1)].mean_function, gpflow.mean_functions.Constant)
npt.assert_allclose(vanilla_deep_gp.f_layers[(- 1)].mean_function.parameters[0], empirical_mean)
assert isinstance(vanilla_deep_gp.f_layers[(- 1)].kernel.kernel, gpflow.kernels.RBF)
npt.assert_allclose(vanilla_deep_gp.f_layers[(- 1)].kernel.kernel.variance, empirical_variance)
assert isinstance(vanilla_deep_gp.likelihood_layer.likelihood, gpflow.likelihoods.Gaussian)
npt.assert_allclose(tf.constant(vanilla_deep_gp.likelihood_layer.likelihood.variance), LIKELIHOOD_VARIANCE)
assert isinstance(vanilla_deep_gp.likelihood_layer.likelihood.variance, gpflow.Parameter)
assert vanilla_deep_gp.likelihood_layer.likelihood.variance.trainable
for layer in vanilla_deep_gp.f_layers:
assert (layer.inducing_variable.num_inducing == num_inducing) |
class TestStreamingPickle(unittest.TestCase):
def setUp(self):
pass
def testSimpleList(self):
data = [1, [1, 2, 3, 4], [8, 9, 29]]
with tempfile.TemporaryFile() as f:
s_dump(data, f)
f.seek(0)
i = 0
for (i, element) in enumerate(s_load(f)):
self.assertEqual(data[i], element)
self.assertEqual(i, (len(data) - 1)) |
def test_from_iter():
a = ak.Array([[1], [2, None]])
assert (to_list(ak.drop_none(a)) == [[1], [2]])
a = ak.Array([[2, None]])
assert (to_list(ak.drop_none(a)) == [[2]])
a = ak.Array([[[None]]])
assert (to_list(ak.drop_none(a)) == [[[]]])
a = ak.Array([1, 2, None])
assert to_list(ak.drop_none(a, axis=0))
a = ak.Array([[[1, None]], [[3, 4]], [[5, 6]], [[7.8]]])
assert (to_list(ak.drop_none(a, axis=2)) == to_list(a[(~ ak.is_none(a, axis=2))]) == [[[1.0]], [[3.0, 4.0]], [[5.0, 6.0]], [[7.8]]])
a = ak.Array([[[0]], [[None]], [[1], None], [[2, None]]])
assert (to_list(ak.drop_none(a, axis=1)) == to_list(a[(~ ak.is_none(a, axis=1))]) == [[[0]], [[None]], [[1]], [[2, None]]])
a = ak.Array([[[0]], [None, 34], [[1], None, 31], [[2, [[None]]]], [[[None]]]])
assert (to_list(ak.drop_none(a, axis=0)) == to_list(a[(~ ak.is_none(a, axis=0))]) == [[[0]], [None, 34], [[1], None, 31], [[2, [[None]]]], [[[None]]]])
a = ak.Array([[[1, None]], [[3, None]], [[5, 6]], [[7.8]]])
assert (to_list(ak.drop_none(a, axis=2)) == to_list(a[(~ ak.is_none(a, axis=2))]) == [[[1.0]], [[3.0]], [[5.0, 6.0]], [[7.8]]])
a = ak.Array([[[1, None]], [[None, 4]], [[5, 6]], [[7.8]]])
assert (to_list(ak.drop_none(a, axis=2)) == to_list(a[(~ ak.is_none(a, axis=2))]) == [[[1.0]], [[4.0]], [[5.0, 6.0]], [[7.8]]])
a = ak.Array([[[1, None]], [[None, None]], [[5, 6]], [[7.8]]])
assert (to_list(ak.drop_none(a, axis=2)) == to_list(a[(~ ak.is_none(a, axis=2))]) == [[[1.0]], [[]], [[5.0, 6.0]], [[7.8]]])
a = ak.Array([[[1, None]], [[None, None]], [[None, 6]], [[7.8]]])
assert (to_list(ak.drop_none(a, axis=2)) == to_list(a[(~ ak.is_none(a, axis=2))]) == [[[1.0]], [[]], [[6.0]], [[7.8]]])
a = ak.Array([[{'x': [1], 'y': [[2]]}], [{'x': [None], 'y': [[None]]}], None])
assert (to_list(a) == [[{'x': [1], 'y': [[2]]}], [{'x': [None], 'y': [[None]]}], None])
assert (to_list(ak.drop_none(a)) == [[{'x': [1], 'y': [[2]]}], [{'x': [], 'y': [[]]}]])
assert (to_list(ak.drop_none(a, axis=0)) == to_list(a[(~ ak.is_none(a, axis=0))]) == [[{'x': [1], 'y': [[2]]}], [{'x': [None], 'y': [[None]]}]])
assert (to_list(ak.drop_none(a, axis=1)) == [[{'x': [1], 'y': [[2]]}], [{'x': [], 'y': [[None]]}], None]) |
def build_hidden_model(n_features, n_outputs, hidden_nodes, compile=False, optimizer='adam', lr=0.01, loss=crps_cost_function, activation='relu'):
if (type(hidden_nodes) is not list):
hidden_nodes = [hidden_nodes]
inp = Input(shape=(n_features,))
x = Dense(hidden_nodes[0], activation=activation)(inp)
if (len(hidden_nodes) > 1):
for h in hidden_nodes[1:]:
x = Dense(h, activation=activation)(x)
x = Dense(n_outputs, activation='linear')(x)
model = Model(inputs=inp, outputs=x)
if compile:
opt = keras.optimizers.__dict__[optimizer](lr=lr)
model.compile(optimizer=opt, loss=loss)
return model |
_task('new_multilingual_masked_lm', dataclass=NewMultiLingualMaskedLMConfig)
class NewMultiLingualMaskedLMTask(LegacyFairseqTask):
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
lang_list = args.langs.split(',')
augment_dictionary(dictionary, lang_list, args.lang_tok_style)
self.lang_tok_style = args.lang_tok_style
self.encoder_langtok = args.encoder_langtok
if args.replace_mask_with_bos:
self.mask_idx = dictionary.bos_index
else:
self.mask_idx = dictionary.add_symbol('<mask>')
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if (bpe is not None):
def is_beginning_of_word(i):
if (i < self.source_dictionary.nspecial):
return True
tok = self.source_dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(self.source_dictionary)))))
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
prob = (dataset_lens / dataset_lens.sum())
smoothed_prob = (prob ** self.args.multilang_sampling_alpha)
smoothed_prob = (smoothed_prob / smoothed_prob.sum())
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
languages = sorted((name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name))))
logger.info('Training on {0} languages: {1}'.format(len(languages), languages))
logger.info('Language to id mapping: ', {lang: id for (id, lang) in enumerate(languages)})
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for (lang_id, language) in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
lang_token = get_lang_tok(language, self.lang_tok_style)
lang_id = self.dictionary.index(lang_token)
if (self.encoder_langtok is None):
beginning_token = None
elif (self.encoder_langtok == 'bos'):
beginning_token = self.source_dictionary.bos()
elif (self.encoder_langtok == 'src'):
beginning_token = lang_id
else:
raise Exception('wrong indicator of beginning token!')
dataset = PrependTokenDataset(dataset, beginning_token)
(src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words)
lang_dataset = NestedDictionaryDataset({'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), 'lang_id': RawLabelDataset(([lang_id] * src_dataset.sizes.shape[0]))}, sizes=[src_dataset.sizes])
lang_datasets.append(lang_dataset)
dataset_lengths = np.array([len(d) for d in lang_datasets], dtype=float)
logger.info('loaded total {} blocks for all languages'.format(dataset_lengths.sum()))
if (split == self.args.train_subset):
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info('Sample probability by language: ', {lang: '{0:.4f}'.format(sample_probs[id]) for (id, lang) in enumerate(languages)})
size_ratio = ((sample_probs * dataset_lengths.sum()) / dataset_lengths)
logger.info('Up/Down Sampling ratio by language: ', {lang: '{0:.2f}'.format(size_ratio[id]) for (id, lang) in enumerate(languages)})
resampled_lang_datasets = [ResamplingDataset(lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=(size_ratio[i] >= 1.0)) for (i, d) in enumerate(lang_datasets)]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for (lang_id, lang_dataset) in enumerate(lang_datasets):
split_name = ((split + '_') + languages[lang_id])
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if (split in self.args.valid_subset):
self.args.valid_subset = self.args.valid_subset.replace(split, ','.join(lang_splits))
with data_utils.numpy_seed((self.args.seed + epoch)):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(dataset, sort_order=[shuffle, dataset.sizes])
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
def setup_text_prompts(cfg, tokenizer):
entity_filepath = cfg.entity_file_path
entity_num = cfg.num_entities
content = open(entity_filepath).read().split('\n')[:entity_num]
entities = [c.split(' ')[0] for c in content]
video_prompt_templates = get_video_prompt_templates()
image_prompt_templates = get_image_prompt_templates()
video_prompts = []
for template in video_prompt_templates:
video_prompts.extend([template.format(e) for e in entities])
image_prompts = []
for template in image_prompt_templates:
image_prompts.extend([template.format(e) for e in entities])
batch_enc_video_prompts = tokenizer.batch_encode_plus(video_prompts, max_length=15, padding='max_length', return_tensors='pt')
batch_enc_image_prompts = tokenizer.batch_encode_plus(image_prompts, max_length=15, padding='max_length', return_tensors='pt')
return dict(video_prompts=video_prompts, image_prompts=image_prompts, batch_enc_video_prompts=batch_enc_video_prompts, batch_enc_image_prompts=batch_enc_image_prompts) |
def _coerce_to_rr(s: Union[(str, RepoRef)]) -> RepoRef:
if isinstance(s, RepoRef):
return s
else:
return RepoRef.from_string(s) |
def add_train_command(subparsers):
subparser = subparsers.add_parser('train', help='Training with NNP.')
subparser.add_argument('-r', '--resume', help='Resume from last saved parameter', action='store_true')
subparser.add_argument('-c', '--config', help='Path to nntxt', required=True)
subparser.add_argument('-p', '--param', help='Path to parameter file', required=False)
subparser.add_argument('-o', '--outdir', help='Output directory', required=True)
subparser.add_argument('-O', '--enable-ooc', help='Enable Out Of Core training', action='store_true')
subparser.add_argument('-m', '--ooc-gpu-memory-size', help='OOC gpu memory size (INTEGER or NUMeNUM or NUM[KkMmGgTtPp])', default=None)
subparser.add_argument('-C', '--context', help='Force exec context (cpu or cudnn[:DEVID])', default=None)
subparser.add_argument('-w', '--ooc-window-length', help='OOC window length (INTEGER or NUMeNUM or NUM[KkMmGgTtPp])', default=None)
callback.add_train_command_arg(subparser)
subparser.set_defaults(func=train_command) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.