code stringlengths 101 5.91M |
|---|
def convert_metadata(old_metadata):
new_metadata = _upgrade_columns_and_keys(old_metadata)
return new_metadata |
def sub_time(time, factor, dt=1, freq=None):
if (factor == 1):
return (time, factor)
elif (factor is not None):
return (ConditionalDimension(name='tsave', parent=time, factor=factor), factor)
else:
return (time, 1) |
def test_instance_header():
header = InstanceHeader(header=['foo', 'bar', 'target'])
assert (header.get_info() == "InstanceHeader: header: ['foo', 'bar', 'target']")
assert (header.get_header_label_at(0) == 'foo')
assert (header.get_header_label_at(4) is None) |
def run_k3mm(device_type: dace.dtypes.DeviceType):
(NI, NJ, NK, NL, NM) = sizes['small']
(A, B, C, D) = initialize(NI, NJ, NK, NL, NM)
if (device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}):
sdfg = k3mm_kernel.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
E = sdfg(A, B, C, D, NI=NI, NJ=NJ, NK=NK, NL=NL, NM=NM)
elif (device_type == dace.dtypes.DeviceType.FPGA):
sdfg = k3mm_kernel.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert (applied == 1)
from dace.libraries.blas import Gemm
Gemm.default_implementation = 'FPGA1DSystolic'
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(NI=NI, NJ=NJ, NK=NK, NL=NL, NM=NM))
E = sdfg(A, B, C, D)
E_ref = k3mm_kernel.f(A, B, C, D)
assert np.allclose(E, E_ref)
return sdfg |
def is_compiler(given, expected):
if (given == expected):
return True
if (len(expected) < len(given)):
return ((given[((len(given) - len(expected)) - 1)] == os.sep) and (given[(len(given) - len(expected)):] == expected))
return False |
def draw_at_coords(ax, coords, attn, img, title, radius=10, target_size=360):
coords = copy.deepcopy(coords)
for i in range(len(coords)):
coords[i] = coords[i][:2]
coords[i][0] = ((coords[i][0] / 27) * target_size)
coords[i][1] = ((coords[i][1] / 27) * target_size)
coords[i] = coords[i][::(- 1)]
attn = attn[:len(coords)]
ax.imshow(resize(img, (target_size, target_size)))
ax.axis('off')
ax.set_title(title, fontsize=20)
for (coord, a) in zip(coords, attn):
a = min(max(a, 0), 1)
patch = Circle(coord, radius, color=(1, (1 - a), (1 - a)))
ax.add_patch(patch) |
def get_t5_sequence_length_from_args(args):
return {'inputs': args.max_seq_length, 'targets': args.answer_max_seq_length} |
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
if (IsBlankLine(line) and (not nesting_state.InNamespaceBody())):
elided = clean_lines.elided
prev_line = elided[(linenum - 1)]
prevbrace = prev_line.rfind('{')
if ((prevbrace != (- 1)) and (prev_line[prevbrace:].find('}') == (- 1))):
exception = False
if Match(' {6}\\w', prev_line):
search_position = (linenum - 2)
while ((search_position >= 0) and Match(' {6}\\w', elided[search_position])):
search_position -= 1
exception = ((search_position >= 0) and (elided[search_position][:5] == ' :'))
else:
exception = (Match(' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)', prev_line) or Match(' {4}:', prev_line))
if (not exception):
error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block should be deleted.')
if ((linenum + 1) < clean_lines.NumLines()):
next_line = raw[(linenum + 1)]
if (next_line and Match('\\s*}', next_line) and (next_line.find('} else ') == (- 1))):
error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block should be deleted.')
matched = Match('\\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3, ('Do not leave a blank line after "%s:"' % matched.group(1)))
commentpos = line.find('//')
if (commentpos != (- 1)):
if (((line.count('"', 0, commentpos) - line.count('\\"', 0, commentpos)) % 2) == 0):
if ((not Match('^\\s*{ //', line)) and (((commentpos >= 1) and (line[(commentpos - 1)] not in string.whitespace)) or ((commentpos >= 2) and (line[(commentpos - 2)] not in string.whitespace)))):
error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments')
commentend = (commentpos + 2)
if ((commentend < len(line)) and (not (line[commentend] == ' '))):
match = (Search('[=/-]{4,}\\s*$', line[commentend:]) or Search('^/$', line[commentend:]) or Search('^!< ', line[commentend:]) or Search('^/< ', line[commentend:]) or Search('^/+ ', line[commentend:]))
if (not match):
error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum]
line = re.sub('operator(==|!=|<|<<|<=|>=|>>|>)\\(', 'operator\\(', line)
if (Search('[\\w.]=[\\w.]', line) and (not Search('\\b(if|while) ', line))):
error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =')
match = Search('[^<>=!\\s](==|!=|<=|>=)[^<>=!\\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3, ('Missing spaces around %s' % match.group(1)))
match = Search('(operator|\\S)(?:L|UL|ULL|l|ul|ull)?<<(\\S)', line)
if (match and (not (match.group(1).isdigit() and match.group(2).isdigit())) and (not ((match.group(1) == 'operator') and (match.group(2) == ';')))):
error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<')
elif (not Match('#.*include', line)):
reduced_line = line.replace('->', '')
match = Search('[^\\s<]<([^\\s=<].*)', reduced_line)
if (match and (not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1)))):
error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <')
match = Search('^(.*[^\\s>])>[^\\s=>]', reduced_line)
if (match and (not FindPreviousMatchingAngleBracket(clean_lines, linenum, match.group(1)))):
error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >')
match = Search('>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>')
match = Search('(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4, ('Extra space for operator %s' % match.group(1)))
match = Search(' (if\\(|for\\(|while\\(|switch\\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5, ('Missing space before ( in %s' % match.group(1)))
match = Search('\\b(if|for|while|switch)\\s*\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$', line)
if match:
if (len(match.group(2)) != len(match.group(4))):
if (not (((match.group(3) == ';') and (len(match.group(2)) == (1 + len(match.group(4))))) or ((not match.group(2)) and Search('\\bfor\\s*\\(.*; \\)', line)))):
error(filename, linenum, 'whitespace/parens', 5, ('Mismatching spaces inside () in %s' % match.group(1)))
if (len(match.group(2)) not in [0, 1]):
error(filename, linenum, 'whitespace/parens', 5, ('Should have zero or one spaces inside ( and ) in %s' % match.group(1)))
if (Search(',[^,\\s]', line) and Search(',[^,\\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,')
if Search(';[^\\s};\\\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;')
CheckSpacingForFunctionCall(filename, line, linenum, error)
match = Match('^(.*[^ ({]){', line)
if match:
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if (endpos > (- 1)):
trailing_text = endline[endpos:]
for offset in xrange((endlinenum + 1), min((endlinenum + 3), (clean_lines.NumLines() - 1))):
trailing_text += clean_lines.elided[offset]
if (not Match('^[\\s}]*[{.;,)<\\]]', trailing_text)):
error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {')
if Search('}else', line):
error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else')
if (Search('\\w\\s+\\[', line) and (not Search('delete\\s+\\[', line))):
error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [')
if Search(':\\s*;\\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.')
elif Search('^\\s*;\\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, use {} instead.')
elif (Search('\\s+;\\s*$', line) and (not Search('\\bfor\\b', line))):
error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty statement, use {} instead.')
if (Search('for *\\(.*[^:]:[^: ]', line) or Search('for *\\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') |
def set_lr_injection(lr_injection_value):
workspace.FeedBlob(_LEARNING_RATE_INJECTION, np.array([float(lr_injection_value)], dtype=np.float32)) |
.unit
.cartographer
def test_build_conditional_css():
helpers.setup()
actual_css = c.build_conditional_css(helpers.TEST_PATH)
expected_css = '\n'.join([' <link rel=\'preload\' href=\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/MarkerCluster.Default.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/MarkerCluster.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/MarkerPopup.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/TileNearestNeighbor.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>'])
helpers.tear_down()
assert (expected_css == actual_css) |
class PytorchTestLogger(unittest.TestCase):
def setUpClass(cls):
Logger.set_log_file('/tmp/')
model = mobilenet_v2(pretrained=True)
core_config = mct.core.CoreConfig(debug_config=mct.core.DebugConfig(analyze_similarity=True))
mct.ptq.pytorch_post_training_quantization_experimental(model, random_datagen, core_config=core_config)
def tearDownClass(cls) -> None:
Logger.shutdown()
def test_tensorboard_log_dir(self):
self.assertTrue(os.path.exists(os.path.join(Logger.LOG_PATH, 'tensorboard_logs')))
def test_tensorboard_initial_graph(self):
events_dir = os.path.join(Logger.LOG_PATH, 'tensorboard_logs/')
events_files = glob.glob((events_dir + 'initial_graph/*events*'))
self.assertTrue((len(events_files) == 1)) |
def PositionalEmbeddingMul(num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool=False):
if learned:
if (padding_idx is not None):
num_embeddings = ((num_embeddings + padding_idx) + 1)
m = LearnedPositionalEmbeddingMul(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=(embedding_dim ** (- 0.5)))
if (padding_idx is not None):
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbeddingMul(embedding_dim, padding_idx, init_size=((num_embeddings + padding_idx) + 1))
return m |
class MAR(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects):
super().__init__(recommendations, config, params, eval_objects)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.binary_relevance
def name():
return 'MAR'
def __user_ar(user_recommendations, cutoff, user_relevant_items):
return np.average([MAR.__user_recall(user_recommendations[:cutoff], (n + 1), user_relevant_items) for n in range(cutoff)])
def __user_recall(user_recommendations, cutoff, user_relevant_items):
return (sum([1 for i in user_recommendations[:cutoff] if (i[0] in user_relevant_items)]) / len(user_relevant_items))
def eval_user_metric(self):
return {u: MAR.__user_ar(u_r, self._cutoff, self._relevance.get_user_rel(u)) for (u, u_r) in self._recommendations.items() if len(self._relevance.get_user_rel(u))} |
class ResidualAttention(nn.Module):
def __init__(self, channel=512, num_class=1000, la=0.2):
super().__init__()
self.la = la
self.fc = nn.Conv2d(in_channels=channel, out_channels=num_class, kernel_size=1, stride=1, bias=False)
def forward(self, x):
(b, c, h, w) = x.shape
y_raw = self.fc(x).flatten(2)
y_avg = torch.mean(y_raw, dim=2)
y_max = torch.max(y_raw, dim=2)[0]
score = (y_avg + (self.la * y_max))
return score |
def NonDecreasingParkingFunctions(n=None):
if (n is None):
return NonDecreasingParkingFunctions_all()
else:
return NonDecreasingParkingFunctions_n(n) |
class WideResnet(nn.Module):
def __init__(self, n_classes, k=1, n=28, low_dim=64, proj=True):
super(WideResnet, self).__init__()
(self.n_layers, self.k) = (n, k)
self.backbone = WideResnetBackbone(k=k, n=n)
self.classifier = nn.Linear((64 * self.k), n_classes, bias=True)
self.proj = proj
if proj:
self.l2norm = Normalize(2)
self.fc1 = nn.Linear((64 * self.k), (64 * self.k))
self.relu_mlp = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.fc2 = nn.Linear((64 * self.k), low_dim)
def forward(self, x):
feat = self.backbone(x)[(- 1)]
feat = torch.mean(feat, dim=(2, 3))
out = self.classifier(feat)
if self.proj:
feat = self.fc1(feat)
feat = self.relu_mlp(feat)
feat = self.fc2(feat)
feat = self.l2norm(feat)
return (out, feat)
else:
return out
def init_weight(self):
nn.init.xavier_normal_(self.classifier.weight)
if (not (self.classifier.bias is None)):
nn.init.constant_(self.classifier.bias, 0) |
.parametrize('func,arg,expected_lines', [('explicit_return_none', None, OrderedSet([8])), ('empty_function', None, OrderedSet([11])), ('pass_function', None, OrderedSet([16])), ('only_return_on_branch', True, OrderedSet([20, 21])), ('only_return_on_branch', False, OrderedSet([20])), ('return_on_both_branches', True, OrderedSet([25, 26])), ('return_on_both_branches', False, OrderedSet([25, 27])), ('pass_on_both', True, OrderedSet([31, 32])), ('pass_on_both', False, OrderedSet([31, 34])), ('for_return', [], OrderedSet([38])), ('for_return', [1], OrderedSet([38, 39]))])
def test_expected_covered_lines(func, arg, expected_lines, artificial_none_module):
tracer = ExecutionTracer()
adapter = LineCoverageInstrumentation(tracer)
transformer = InstrumentationTransformer(tracer, [adapter])
func_object = getattr(artificial_none_module, func)
func_object.__code__ = transformer.instrument_module(func_object.__code__)
tracer.current_thread_identifier = threading.current_thread().ident
func_object(arg)
assert (tracer.lineids_to_linenos(tracer.get_trace().covered_line_ids) == expected_lines) |
class EnasCnnModelBuilder(DAGModelBuilder):
def __init__(self, session=None, controller=None, dag_func='EnasConv1DDAG', l1_reg=0.0, l2_reg=0.0, batch_size=None, dag_kwargs=None, *args, **kwargs):
super().__init__(*args, dag_func=dag_func, **kwargs)
self.session = session
self.controller = controller
self.l1_reg = float(l1_reg)
self.l2_reg = float(l2_reg)
self.batch_size = (batch_size or 128)
self.dag_kwargs = (dag_kwargs or {})
self._build_dag()
assert issubclass(type(self.dag), EnasConv1dDAG), 'EnasModelBuilder only support enasDAG and its derivatives'
def _build_dag(self):
self.dag = self.dag_func(model_space=self.model_space, input_node=self.inputs_op, output_node=self.output_op, session=self.session, model_compile_dict=self.model_compile_dict, l1_reg=self.l1_reg, l2_reg=self.l2_reg, controller=self.controller, batch_size=self.batch_size, **self.dag_kwargs)
def __call__(self, arc_seq=None, *args, **kwargs):
model = self.dag(arc_seq, **kwargs)
model.compile(**self.model_compile_dict)
return model
def set_controller(self, controller):
self.dag.set_controller(controller) |
def save_plots_for_dataset_model(path_save: Path, optimizer_list=None, epochs=None):
try:
results = get_result_list(results_path=path_save, optimizer_list=optimizer_list)
graph_title = f'{path_save.stem.upper()}'
plots_path = (path_save / 'plots')
plots_path.mkdir(exist_ok=True)
save_path_acc = (plots_path / Path((path_save.stem + '_accuracy.png')))
save_path_time = (plots_path / Path((path_save.stem + '_run_times.png')))
show_loss_acc_graph(opt_out_list=results, graph_title=graph_title, save_path=save_path_acc, epochs=epochs)
show_time_graph(opt_out_list=results, graph_title=graph_title, save_path=save_path_time, epochs=epochs)
except Exception as err:
logger.error(f'''Can make plot(s)
{err}''') |
class RandomSideObstacleBreakoutWorld(BreakoutWorld):
side_obstacle_width_range_start = 0
side_obstacle_width_range_end = 20
def reset_world(self):
super(RandomSideObstacleBreakoutWorld, self).reset_world()
self.reset_obstacle()
def reset_obstacle(self):
if hasattr(self, 'obstacle'):
self.obstacle.kill()
side = self.np_random.choice(['left', 'right'])
width = int(self.np_random.uniform(self.side_obstacle_width_range_start, self.side_obstacle_width_range_end))
if (side == 'left'):
x = (width / 2)
elif (side == 'right'):
x = (self._width - (width / 2))
self.obstacle = SideObstacle(world=self, position=(x, (self._height / 2)), width=width)
self._batch.add(self.obstacle, z=1)
self._obstacle_side = side
self._obstacle_width = width
def parameters(self):
parameters = super(RandomSideObstacleBreakoutWorld, self).parameters
parameters.update({'obstacle_side': self._obstacle_side, 'obstacle_width': self._obstacle_width})
return parameters |
def parse_constants_2002to2014(d):
constants = {}
for line in d.split('\n'):
name = line[:55].rstrip()
val = line[55:77].replace(' ', '').replace('...', '')
val = float(val)
uncert = line[77:99].replace(' ', '').replace('(exact)', '0')
uncert = float(uncert)
units = line[99:].rstrip()
constants[name] = (val, units, uncert)
return constants |
class InvertedResidual(nn.Module):
def __init__(self, cnf: InvertedResidualConfig, bn_norm, se_layer: Callable[(..., nn.Module)]=SqueezeExcitation):
super().__init__()
if (not (1 <= cnf.stride <= 2)):
raise ValueError('illegal stride value')
self.use_res_connect = ((cnf.stride == 1) and (cnf.input_channels == cnf.out_channels))
layers: List[nn.Module] = []
activation_layer = (nn.Hardswish if cnf.use_hs else nn.ReLU)
if (cnf.expanded_channels != cnf.input_channels):
layers.append(ConvBNActivation(cnf.input_channels, cnf.expanded_channels, kernel_size=1, bn_norm=bn_norm, activation_layer=activation_layer))
stride = (1 if (cnf.dilation > 1) else cnf.stride)
layers.append(ConvBNActivation(cnf.expanded_channels, cnf.expanded_channels, kernel_size=cnf.kernel, stride=stride, dilation=cnf.dilation, groups=cnf.expanded_channels, bn_norm=bn_norm, activation_layer=activation_layer))
if cnf.use_se:
layers.append(se_layer(cnf.expanded_channels))
layers.append(ConvBNActivation(cnf.expanded_channels, cnf.out_channels, kernel_size=1, bn_norm=bn_norm, activation_layer=nn.Identity))
self.block = nn.Sequential(*layers)
self.out_channels = cnf.out_channels
self._is_cn = (cnf.stride > 1)
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result += input
return result |
def test_verify_compatibility_type_errors():
valid_inducing_variable = construct_basic_inducing_variables([35], input_dim=40)
valid_kernel = construct_basic_kernel([Matern52()])
valid_mean_function = Zero()
with pytest.raises(GPLayerIncompatibilityException):
verify_compatibility(Matern52(), valid_mean_function, valid_inducing_variable)
Z = valid_inducing_variable.inducing_variable_list[0].Z
inducing_variable = InducingPoints(Z)
with pytest.raises(GPLayerIncompatibilityException):
verify_compatibility(valid_kernel, valid_mean_function, inducing_variable) |
def recursive_obs_dict_to_spaces_dict(obs):
assert isinstance(obs, dict)
dict_of_spaces = {}
for (k, v) in obs.items():
_v = v
if isinstance(v, list):
_v = np.array(v)
elif isinstance(v, (int, np.integer, float, np.floating)):
_v = np.array([v])
if isinstance(_v, np.ndarray):
x = float(BIG_NUMBER)
box = spaces.Box(low=(- x), high=x, shape=_v.shape, dtype=_v.dtype)
low_high_valid = ((box.low < 0).all() and (box.high > 0).all())
while (not low_high_valid):
x = (x // 2)
box = spaces.Box(low=(- x), high=x, shape=_v.shape, dtype=_v.dtype)
low_high_valid = ((box.low < 0).all() and (box.high > 0).all())
dict_of_spaces[k] = box
elif isinstance(_v, dict):
dict_of_spaces[k] = recursive_obs_dict_to_spaces_dict(_v)
else:
raise TypeError
return spaces.Dict(dict_of_spaces) |
def main(output_dir):
os.makedirs(output_dir, exist_ok=True)
dl_path = snapshot_download(repo_id='biglab/webui-all', repo_type='dataset')
combined_zip_path = os.path.join(output_dir, 'webui-merged.zip')
if (not os.path.exists(combined_zip_path)):
part_paths = sorted(glob.glob(os.path.join(dl_path, '*.zip.*')))
print('Merging...', len(part_paths), 'parts')
with open(combined_zip_path, 'wb') as merged_fp:
for fn in tqdm.tqdm(part_paths):
with open(fn, 'rb') as part_fp:
merged_fp.write(part_fp.read())
print(combined_zip_path) |
def run_episodes(session_list):
work_num = 4
if ((len(session_list) > work_num) and (not (llm_name in (OPENAI_CHAT_MODELS + OPENAI_LLM_MODELS)))):
with ThreadPoolExecutor(max_workers=work_num) as executor:
results = list(executor.map(run_one_session, session_list))
print('Done the session running!')
else:
for sid in session_list:
run_one_session(sid) |
def obser_parser(observation, instruction_text):
obs = observation.encode().decode('unicode-escape').encode('latin1').decode('utf-8')
obs = obs.replace('[button]', '[')
obs = obs.replace('[button_]', ']')
if obs.startswith('Instruction:'):
obs = obs.replace(instruction_text, '')
obs = obs.replace('Instruction:', '')
if obs.startswith('You have clicked'):
obs = obs.split('\n')[0]
obs = obs.lstrip()
return obs |
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls |
class HBFile():
def __init__(self, file, hb_info=None):
self._fid = file
if (hb_info is None):
self._hb_info = HBInfo.from_file(file)
else:
self._hb_info = hb_info
def title(self):
return self._hb_info.title
def key(self):
return self._hb_info.key
def type(self):
return self._hb_info.mxtype.value_type
def structure(self):
return self._hb_info.mxtype.structure
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info) |
class RendererDecoder(nn.Module):
def __init__(self, im_channels, h_dim, lstm_dim):
super().__init__()
self.decode = nn.Conv2d(lstm_dim, h_dim, 5, stride=1, padding=2)
self.convt = nn.ConvTranspose2d(h_dim, (h_dim * 2), 4, stride=2, padding=1)
self.convt2 = nn.ConvTranspose2d((h_dim * 2), im_channels, 4, stride=2, padding=1)
def sample(self, mu, logvar):
return self.reparam.sample_gaussian(mu, logvar)
def forward(self, h):
xx = F.relu(self.decode(h))
xx = F.relu(self.convt(xx))
return self.convt2(xx) |
def format_results(runs, title, split_name, step, best_other) -> str:
run_group = lib.common.group(runs, ['transformer.variant'])
variants = {v for (k, v) in init_type_table.items() if (f'transformer.variant_{k}' in run_group)}
rtmp = []
for i in init_order:
if (i not in variants):
continue
cols = []
for clist in columns.values():
cols.append(None)
for c in clist:
full_name = f'transformer.variant_{c}'
if ((init_type_table[c] == i) and (full_name in run_group)):
assert (cols[(- 1)] is None), "Can't be multiple variants"
cols[(- 1)] = average_accuracy(run_group[full_name], split_name, step)
rtmp.append((i, cols))
cols = [[r[1][i].mean for r in rtmp if r[1][i]] for i in range(len(columns))]
maxy = [(round(max(c), 2) if c else None) for c in cols]
rows = []
for (i, cols) in rtmp:
maxx = max((c.mean for c in cols if (c is not None)))
cols = [(((('\\bf{' if (maxy[ci] and (round(c.mean, 2) == maxy[ci])) else '') + f'{c.mean:.2f} $\pm$ {c.std:.2f}') + ('}' if (maxy[ci] and (round(c.mean, 2) == maxy[ci])) else '')) if (c is not None) else '-') for (ci, c) in enumerate(cols)]
rows.append((f'{i} & ' + ' & '.join(cols)))
res = ((f'\multirow{{{len(rows)}}}{{*}}{{{title}}} & ' + rows[0]) + f''' & \multirow{{{len(rows)}}}{{*}}{{{best_other}}} \
''')
for r in rows[1:]:
res += f''' & {r} & \
'''
return res |
def _remove(file_set, module_base, to_remove):
path = os.path.join(*module_base.split('.'))
for filename in to_remove:
if filename.startswith('.'):
filename = (path + filename)
else:
filename = os.path.join(path, filename)
remove = [filename]
remove.append(importlib.util.cache_from_source(filename))
file_set.difference_update(remove) |
def prepare_train(save_json_train, save_json_valid, save_json_test=None, split_ratio=[80, 20], win_len=0.02, stride=0.02, seed=12, emovdb_folder=None, esd_folder=None, iemocap_folder=None, jlcorpus_folder=None, ravdess_folder=None):
random.seed(seed)
if (os.path.exists(save_json_train) and os.path.exists(save_json_valid)):
logger.info('train/valid json both exist, skipping preparation.')
return
all_dict = {}
check_and_prepare_dataset(emovdb_folder, 'EMOV-DB', prepare_emovdb, all_dict, seed)
check_and_prepare_dataset(esd_folder, 'ESD', prepare_esd, all_dict, seed)
check_and_prepare_dataset(iemocap_folder, 'IEMOCAP', prepare_iemocap, all_dict, seed)
check_and_prepare_dataset(jlcorpus_folder, 'JL_CORPUS', prepare_jlcorpus, all_dict, seed)
check_and_prepare_dataset(ravdess_folder, 'RAVDESS', prepare_ravdess, all_dict, seed)
bad_keys = []
for key in all_dict.keys():
try:
(intervals, ctc_label, frame_label) = get_labels(all_dict[key], win_len, stride)
all_dict[key]['frame_label'] = frame_label
all_dict[key]['ctc_label'] = ctc_label
except ValueError:
logger.info(f'Impossible to get labels for id {key} because the window is too large.')
bad_keys.append(key)
continue
for key in bad_keys:
del all_dict[key]
data_split = split_sets(all_dict, split_ratio)
train_ids = data_split['train']
train_split = {}
for id in train_ids:
train_split[id] = all_dict[id]
valid_ids = data_split['valid']
valid_split = {}
for id in valid_ids:
valid_split[id] = all_dict[id]
create_json(train_split, save_json_train)
create_json(valid_split, save_json_valid)
if (save_json_test is not None):
test_ids = data_split['test']
test_split = {}
for id in test_ids:
test_split[id] = all_dict[id]
create_json(test_split, save_json_test) |
class ModelErrors(object):
def __init__(self, model_name):
self.name = model_name
self.top_1_error_cases = None
self.top_10_error_cases = None |
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True)
def test_print_string():
def func(x: ti.i32, y: ti.f32):
print('hello, world! %s %d %f', 233, y)
print('cool', x, 'well', y)
func(666, 233.3)
ti.sync() |
class RE24():
def __init__(self):
self.problem_name = 'RE24'
self.n_objectives = 2
self.n_variables = 2
self.n_constraints = 0
self.n_original_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.5
self.ubound[0] = 4
self.ubound[1] = 50
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
f[0] = (x1 + (120 * x2))
E = 700000
sigma_b_max = 700
tau_max = 450
delta_max = 1.5
sigma_k = (((E * x1) * x1) / 100)
sigma_b = (4500 / (x1 * x2))
tau = (1800 / x2)
delta = ((56.2 * 10000) / (((E * x1) * x2) * x2))
g[0] = (1 - (sigma_b / sigma_b_max))
g[1] = (1 - (tau / tau_max))
g[2] = (1 - (delta / delta_max))
g[3] = (1 - (sigma_b / sigma_k))
g = np.where((g < 0), (- g), 0)
f[1] = (((g[0] + g[1]) + g[2]) + g[3])
return f |
def save_checkpoint(state, checkpoint, is_best=False, name=None):
if (not os.path.exists(checkpoint)):
print('Checkpoint Directory does not exist! Making directory {}'.format(checkpoint))
os.mkdir(checkpoint)
if is_best:
if (name is not None):
filepath = os.path.join(checkpoint, name)
torch.save(state, filepath)
else:
filepath = os.path.join(checkpoint, 'best.pth.tar')
torch.save(state, filepath)
else:
filepath = os.path.join(checkpoint, 'last.pth.tar')
torch.save(state, filepath) |
class RangeSampler(Sampler):
def __init__(self, start_ind, end_ind):
self.start_ind = start_ind
self.end_ind = end_ind
def __iter__(self):
indices = torch.arange(self.start_ind, self.end_ind).tolist()
return iter(indices)
def __len__(self):
return (self.end_ind - self.start_ind) |
class KerasModel(Feedable):
def placeholders(self):
pass
def placeholders_union(cls, models):
phs = []
for model in models:
phs.extend(model.placeholders)
return phs
def output_tensors(self):
pass |
class Array(object):
def __init__(self, typ, dims, intent, obj):
self.type = typ
self.dims = dims
self.intent = intent
self.obj_copy = copy.deepcopy(obj)
self.obj = obj
self.arr = wrap.call(typ.type_num, dims, intent.flags, obj)
assert_(isinstance(self.arr, ndarray), repr(type(self.arr)))
self.arr_attr = wrap.array_attrs(self.arr)
if (len(dims) > 1):
if self.intent.is_intent('c'):
assert_((intent.flags & wrap.F2PY_INTENT_C))
assert_((not self.arr.flags['FORTRAN']), repr((self.arr.flags, getattr(obj, 'flags', None))))
assert_(self.arr.flags['CONTIGUOUS'])
assert_((not (self.arr_attr[6] & wrap.FORTRAN)))
else:
assert_((not (intent.flags & wrap.F2PY_INTENT_C)))
assert_(self.arr.flags['FORTRAN'])
assert_((not self.arr.flags['CONTIGUOUS']))
assert_((self.arr_attr[6] & wrap.FORTRAN))
if (obj is None):
self.pyarr = None
self.pyarr_attr = None
return
if intent.is_intent('cache'):
assert_(isinstance(obj, ndarray), repr(type(obj)))
self.pyarr = array(obj).reshape(*dims).copy()
else:
self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), order=((self.intent.is_intent('c') and 'C') or 'F'))
assert_((self.pyarr.dtype == typ), repr((self.pyarr.dtype, typ)))
assert_(self.pyarr.flags['OWNDATA'], (obj, intent))
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if (len(dims) > 1):
if self.intent.is_intent('c'):
assert_((not self.pyarr.flags['FORTRAN']))
assert_(self.pyarr.flags['CONTIGUOUS'])
assert_((not (self.pyarr_attr[6] & wrap.FORTRAN)))
else:
assert_(self.pyarr.flags['FORTRAN'])
assert_((not self.pyarr.flags['CONTIGUOUS']))
assert_((self.pyarr_attr[6] & wrap.FORTRAN))
assert_((self.arr_attr[1] == self.pyarr_attr[1]))
assert_((self.arr_attr[2] == self.pyarr_attr[2]))
if (self.arr_attr[1] <= 1):
assert_((self.arr_attr[3] == self.pyarr_attr[3]), repr((self.arr_attr[3], self.pyarr_attr[3], self.arr.tobytes(), self.pyarr.tobytes())))
assert_((self.arr_attr[5][(- 2):] == self.pyarr_attr[5][(- 2):]), repr((self.arr_attr[5], self.pyarr_attr[5])))
assert_((self.arr_attr[6] == self.pyarr_attr[6]), repr((self.arr_attr[6], self.pyarr_attr[6], flags2names(((0 * self.arr_attr[6]) - self.pyarr_attr[6])), flags2names(self.arr_attr[6]), intent)))
if intent.is_intent('cache'):
assert_((self.arr_attr[5][3] >= self.type.elsize), repr((self.arr_attr[5][3], self.type.elsize)))
else:
assert_((self.arr_attr[5][3] == self.type.elsize), repr((self.arr_attr[5][3], self.type.elsize)))
assert_(self.arr_equal(self.pyarr, self.arr))
if isinstance(self.obj, ndarray):
if (typ.elsize == Type(obj.dtype).elsize):
if ((not intent.is_intent('copy')) and (self.arr_attr[1] <= 1)):
assert_(self.has_shared_memory())
def arr_equal(self, arr1, arr2):
if (arr1.shape != arr2.shape):
return False
s = (arr1 == arr2)
return alltrue(s.flatten())
def __str__(self):
return str(self.arr)
def has_shared_memory(self):
if (self.obj is self.arr):
return True
if (not isinstance(self.obj, ndarray)):
return False
obj_attr = wrap.array_attrs(self.obj)
return (obj_attr[0] == self.arr_attr[0]) |
def test_with_sensitive_markers(config):
new_markers = {'new_marker1', 'new_marker2'}
updated_config = config.with_sensitive_markers(*new_markers)
assert (updated_config.sensitive_markers == DEFAULT_SENSITIVE_MARKERS.union(new_markers)) |
class Connection(Base):
src: str
trg: str
fromLane: int
toLane: int
pas: bool = field(default=False)
keepClear: bool = field(default=True)
contPos: float = field(default=(- 1))
visibility: float = field(default=4.5)
speed: float = field(default=(- 1))
shape: List[Tuple[(float, float)]] = field(default=None) |
def test_run_test_case_chromosome_has_result():
executor = MagicMock()
result = MagicMock()
executor.execute.return_value = result
func = DummyTestCaseChromosomeComputation(executor)
test_case = tcc.TestCaseChromosome(MagicMock())
test_case.changed = False
test_case.set_last_execution_result(result)
assert (func._run_test_case_chromosome(test_case) == result)
assert (test_case.get_last_execution_result() == result) |
def main():
opt = get_opt()
print(opt)
print(('Start to train stage: %s, named: %s!' % (opt.stage, opt.name)))
train_dataset = CPDataset(opt)
train_loader = CPDataLoader(opt, train_dataset)
if (not os.path.exists(opt.tensorboard_dir)):
os.makedirs(opt.tensorboard_dir)
board = SummaryWriter(log_dir=os.path.join(opt.tensorboard_dir, opt.name))
if (opt.stage == 'GMM'):
model = GMM(opt)
if ((not (opt.checkpoint == '')) and os.path.exists(opt.checkpoint)):
load_checkpoint(model, opt.checkpoint)
train_gmm(opt, train_loader, model, board)
save_checkpoint(model, os.path.join(opt.checkpoint_dir, opt.name, 'gmm_final.pth'))
elif (opt.stage == 'TOM'):
model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)
if ((not (opt.checkpoint == '')) and os.path.exists(opt.checkpoint)):
load_checkpoint(model, opt.checkpoint)
train_tom(opt, train_loader, model, board)
save_checkpoint(model, os.path.join(opt.checkpoint_dir, opt.name, 'tom_final.pth'))
else:
raise NotImplementedError(('Model [%s] is not implemented' % opt.stage))
print(('Finished training %s, nameed: %s!' % (opt.stage, opt.name))) |
class Net_purchase(nn.Module):
def __init__(self):
super(Net_purchase, self).__init__()
self.fc1 = nn.Linear(600, 300)
self.fc2 = nn.Linear(300, 50)
self.fc3 = nn.Linear(50, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x |
class ContrastiveLearningDataset():
def __init__(self, root_folder):
self.root_folder = root_folder
def get_simclr_pipeline_transform(size, s=1):
color_jitter = transforms.ColorJitter((0.8 * s), (0.8 * s), (0.8 * s), (0.2 * s))
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size), transforms.RandomHorizontalFlip(), transforms.RandomApply([color_jitter], p=0.8), transforms.RandomGrayscale(p=0.2), GaussianBlur(kernel_size=int((0.1 * size))), transforms.ToTensor()])
return data_transforms
def get_dataset(self, name, n_views):
valid_datasets = {'cifar10': (lambda : datasets.CIFAR10(self.root_folder, train=True, transform=ContrastiveLearningViewGenerator(self.get_simclr_pipeline_transform(32), n_views), download=True)), 'stl10': (lambda : datasets.STL10(self.root_folder, split='unlabeled', transform=ContrastiveLearningViewGenerator(self.get_simclr_pipeline_transform(96), n_views), download=True))}
try:
dataset_fn = valid_datasets[name]
except KeyError:
raise InvalidDatasetSelection()
else:
return dataset_fn() |
class Bottleneck_IBN(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, ibn=True):
super(Bottleneck_IBN, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn:
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def convert_loras_to_safeloras(modelmap: Dict[(str, Tuple[(str, Set[str], int)])]={}, outpath='./lora.safetensors'):
convert_loras_to_safeloras_with_embeds(modelmap=modelmap, outpath=outpath) |
class AFLBitmap():
BITMAP_SIZE = 1048576
def __init__(self, bitmap=None):
self.bitmap = np.array(bytearray())
if (bitmap is not None):
if isinstance(bitmap, np.ndarray):
assert (np.sum(np.where((bitmap > 1), 1, 0)) == 0)
self.bitmap = np.array(bitmap, copy=True)
else:
self.bitmap = np.array(bytearray(bitmap), dtype='uint8')
self.normalize_bitmap()
def empty(cls):
b = (bytearray([0]) * cls.BITMAP_SIZE)
bitmap = np.array(b)
del b
return cls(bitmap=bitmap)
def normalize_bitmap(self):
self.bitmap = np.array(np.where((self.bitmap != 255), 1, 0), dtype='uint8')
def is_new(self, data):
if (len(self.bitmap) == 0):
return True
else:
return (data.delta(self.bitmap) > 0)
def initialize_bitmap_if_necessary(self, size):
if ((len(self.bitmap) == 0) and (size > 0)):
b = (bytearray([0]) * size)
self.bitmap = np.array(b)
del b
def count(self):
return np.sum(self.bitmap)
def delta(self, other):
if (len(other.bitmap) > 0):
self.initialize_bitmap_if_necessary(len(other.bitmap))
elif (len(self.bitmap) > 0):
other.initialize_bitmap_if_necessary(len(self.bitmap))
assert (len(self.bitmap) == len(other.bitmap))
delta = ((self.bitmap | other.bitmap) - other.bitmap)
return AFLBitmap(delta)
def reset(self):
self.bitmap = np.array(bytearray())
def delta_count(self, other):
return np.sum(self.delta(other).bitmap)
def update(self, other):
if (len(other.bitmap) == 0):
return
self.initialize_bitmap_if_necessary(len(other.bitmap))
assert (len(self.bitmap) == len(other.bitmap))
u = (self.bitmap | other.bitmap)
self.bitmap = u
def union(self, other):
if (len(other.bitmap) == 0):
return
self.initialize_bitmap_if_necessary(len(other.bitmap))
assert (len(self.bitmap) == len(other.bitmap))
u = (self.bitmap | other.bitmap)
return AFLBitmap(u)
def __or__(self, other):
return self.union(other)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return str(self.bitmap) |
def removeSinglePoint(data):
newData = []
for stroke in data:
if (len(stroke[0]) > 1):
newData.append(stroke)
return newData |
class ImprovedBCELoss(nn.Module):
def __init__(self, lambda_):
super(ImprovedBCELoss, self).__init__()
self.L = lambda_
def forward(self, s, im):
astype = torch.float
im = im.type(astype)
s = s.type(astype)
weight_1 = ((self.L / torch.sum(im, dim=1, keepdim=True, dtype=astype)) * im)
weight_2 = (((1 - self.L) / torch.sum((1 - im), dim=1, keepdim=True, dtype=astype)) * (1 - im))
weight_1[(weight_1 != weight_1)] = 0
weight_2[(weight_2 != weight_2)] = 0
res1 = torch.nn.functional.binary_cross_entropy_with_logits(s, im, weight=weight_1, reduction='sum')
res2 = torch.nn.functional.binary_cross_entropy_with_logits(s, im, weight=weight_2, reduction='sum')
return (res1 + res2) |
.torch
.parametrize('query_ids, scores, unseen_items', [(torch.tensor([0], dtype=torch.long), torch.tensor([0, 1, 2, 3, 4], dtype=torch.float), torch.tensor([False, False, False, True, True], dtype=torch.bool)), (torch.tensor([1], dtype=torch.long), torch.tensor([0, 1, 2, 3, 4], dtype=torch.float), torch.tensor([False, False, False, False, True], dtype=torch.bool)), (torch.tensor([2, 3], dtype=torch.long), torch.tensor([[0, 1, 2, 3, 4], [2, 4, 1, 3, 3]], dtype=torch.float), torch.tensor([[False, False, False, False, True], [True, False, False, False, True]], dtype=torch.bool))])
def test_remove_seen_items_on_predict(item_user_sequential_dataset, query_ids, scores, unseen_items):
postprocessor = RemoveSeenItems(item_user_sequential_dataset)
(_, scores_pred) = postprocessor.on_prediction(query_ids=query_ids, scores=scores)
scores_pred_unseen = (scores_pred.flatten() > (- np.inf))
assert all((scores_pred_unseen == unseen_items.flatten())) |
class TestLSTMED(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
set_random_seeds()
self.model = LSTMED(config=LSTMED.config_class(num_epochs=5))
self.dataset = MSL(rootdir=join(rootdir, 'data', 'smap'))
(df, metadata) = self.dataset[0]
(self.train_df, self.test_df, self.test_labels) = get_train_test_splits(df, metadata, 1000)
logger.info('Training model...\n')
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
def test_score(self):
print(('-' * 80))
logger.info((('test_score\n' + ('-' * 80)) + '\n'))
test_ts = TimeSeries.from_pd(self.test_df)
score_ts = self.model.get_anomaly_label(test_ts)
scores = score_ts.to_pd().values.flatten()
(min_score, max_score, sum_score) = (min(scores), max(scores), sum(scores))
logger.info(f'scores look like: {scores[:10]}')
logger.info(f'min score = {min_score}')
logger.info(f'max score = {max_score}')
logger.info(f'sum score = {sum_score}')
def test_save_load(self):
print(('-' * 80))
logger.info((('test_save_load\n' + ('-' * 80)) + '\n'))
self.model.save(dirname=join(rootdir, 'tmp', 'lstmed'))
loaded_model = LSTMED.load(dirname=join(rootdir, 'tmp', 'lstmed'))
test_ts = TimeSeries.from_pd(self.test_df)
scores = self.model.get_anomaly_score(test_ts)
loaded_model_scores = loaded_model.get_anomaly_score(test_ts)
self.assertSequenceEqual(list(scores), list(loaded_model_scores))
alarms = self.model.get_anomaly_label(test_ts)
loaded_model_alarms = loaded_model.get_anomaly_label(test_ts)
self.assertSequenceEqual(list(alarms), list(loaded_model_alarms)) |
def reduce_zero(Q, coeffs, offset, exact_form=None):
a = coeffs[int(offset)]
if (a[2] == 0):
return exact_form
Qa = Q[1]
a[0] = (a[0] - ((a[2] * Qa) / 3))
coeffs[int(offset)] = a
if (exact_form is not None):
y = exact_form.parent()(exact_form.parent().base_ring().gen(0))
exact_form += (Q.base_ring()((a[2] / 3)) * y)
a[2] = 0
coeffs[int(offset)] = a
return exact_form |
def fine_tune(data_loader, ifold, meta_m, weights_for_finetune, exp_string):
is_finetune = True
print('finetunning MetaPred model ...')
if (FLAGS.method == 'cnn'):
m2 = finetune.CNN(data_loader, weights_for_finetune, freeze_opt=freeze_opt, is_finetune=is_finetune)
if (FLAGS.method == 'rnn'):
m2 = finetune.RNN(data_loader, weights_for_finetune, freeze_opt=freeze_opt, is_finetune=is_finetune)
print('model finetunning...')
(sess, _, _) = m2.fit(data_loader.tt_sample[ifold], data_loader.tt_label[ifold], data_loader.tt_sample_val[ifold], data_loader.tt_label_val[ifold])
return (m2, sess) |
def FetchInt8BlobRealVal(name):
result = C.fetch_blob(StringifyBlobName(name))
assert isinstance(result, tuple), 'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(StringifyBlobName(name))
int8_blob = Int8Tensor(*result)
return ((int8_blob.data.astype(np.int32) - int(int8_blob.zero_point)).astype(np.float32) * int8_blob.scale) |
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning('FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead', stacklevel=4) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--data_label', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--train_batch_size', default=16, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=64, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=1e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
processors = {'rte': RteProcessor}
output_modes = {'rte': 'classification'}
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
task_name = args.task_name.lower()
if (task_name not in processors):
raise ValueError(('Task not found: %s' % task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
(train_examples, _) = get_FEVER_examples('train', hypo_only=False)
(dev_and_test_examples, _) = get_FEVER_examples('dev', hypo_only=False)
random.shuffle(dev_and_test_examples)
dev_examples = dev_and_test_examples[:(- 10000)]
test_examples = dev_and_test_examples[(- 10000):]
label_list = ['entailment', 'not_entailment']
num_labels = len(label_list)
print('num_labels:', num_labels, 'training size:', len(train_examples), 'dev size:', len(dev_examples), ' test size:', len(test_examples))
num_train_optimization_steps = None
num_train_optimization_steps = (int(((len(train_examples) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
model = RobertaForSequenceClassification(num_labels)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.load_state_dict(torch.load('DocNLI.pretrained.RoBERTA.model.pt', map_location=device))
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
max_test_acc = 0.0
max_dev_acc = 0.0
if args.do_train:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token=tokenizer.cls_token, cls_token_segment_id=0, sep_token=tokenizer.sep_token, sep_token_extra=True, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
dev_features = convert_examples_to_features(dev_examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token=tokenizer.cls_token, cls_token_segment_id=0, sep_token=tokenizer.sep_token, sep_token_extra=True, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
dev_all_input_ids = torch.tensor([f.input_ids for f in dev_features], dtype=torch.long)
dev_all_input_mask = torch.tensor([f.input_mask for f in dev_features], dtype=torch.long)
dev_all_segment_ids = torch.tensor([f.segment_ids for f in dev_features], dtype=torch.long)
dev_all_label_ids = torch.tensor([f.label_id for f in dev_features], dtype=torch.long)
dev_data = TensorDataset(dev_all_input_ids, dev_all_input_mask, dev_all_segment_ids, dev_all_label_ids)
dev_sampler = SequentialSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.eval_batch_size)
test_features = convert_examples_to_features(test_examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token=tokenizer.cls_token, cls_token_segment_id=0, sep_token=tokenizer.sep_token, sep_token_extra=True, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
test_all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
test_all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
test_all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
test_all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(test_all_input_ids, test_all_input_mask, test_all_segment_ids, test_all_label_ids)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_examples))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
iter_co = 0
final_test_performance = 0.0
for _ in trange(int(args.num_train_epochs), desc='Epoch'):
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
model.train()
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids) = batch
logits = model(input_ids, input_mask)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), num_labels), label_ids.view((- 1)))
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
iter_co += 1
'\n start evaluate on dev set after this epoch\n '
model.eval()
dev_acc = evaluation(dev_dataloader, device, model)
if (dev_acc > max_dev_acc):
max_dev_acc = dev_acc
print('\ndev acc:', dev_acc, ' max_dev_acc:', max_dev_acc, '\n')
final_test_performance = evaluation(test_dataloader, device, model)
print('\ntest acc:', final_test_performance, '\n')
else:
print('\ndev acc:', dev_acc, ' max_dev_acc:', max_dev_acc, '\n')
print('final_test_performance:', final_test_performance) |
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.fc1 = nn.Linear(5, 3)
def forward(self, x):
return (x + 2) |
class TFCamembertForTokenClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def video2segments(infos):
global count
(index, uid, dur) = (infos[0], infos[1], infos[2])
input_path = os.path.join(video_dir, (uid + '.mp4'))
output_uid_dir = os.path.join(output_dir, uid)
if (not os.path.exists(output_uid_dir)):
os.makedirs(output_uid_dir)
assert os.path.exists(input_path)
cap = cv2.VideoCapture(input_path)
rate = cap.get(5)
frame_num = cap.get(7)
duration = (frame_num / rate)
if (duration <= dur_limit):
shutil.copyfile(input_path, os.path.join(output_uid_dir, '0.mp4'))
return
num_seg = (duration // dur_limit)
s1_time = 0
s2_time = dur_limit
num_finished = 0
while (num_finished <= num_seg):
output_mp4_path = os.path.join(output_uid_dir, (str(num_finished) + '.mp4'))
cmd = 'ffmpeg -y -i {} -ss {} -to {} -async 1 {}'.format(input_path, s1_time, s2_time, output_mp4_path)
subprocess.call(cmd, shell=True)
s1_time = s2_time
s2_time = min((s1_time + dur_limit), duration)
num_finished += 1
return |
def get_equal_array_size(xs, ys, ints):
max_size = max(list(map(len, xs)))
for i in range(len(xs)):
xs[i] = np.append(xs[i], (xs[i][(- 1)] * np.ones([(max_size - len(xs[i]))])))
ys[i] = np.append(ys[i], (ys[i][(- 1)] * np.ones([(max_size - len(ys[i]))])))
ints[i] = np.append(ints[i], (ints[i][(- 1)] * np.ones([(max_size - len(ints[i]))])))
return (xs, ys, ints, max_size) |
(OperatorDef)
def analyze_op(analyzer, op):
for x in op.input:
analyzer.need_blob(x)
for x in op.output:
analyzer.define_blob(x) |
class FidelityKernel(KernelMatrixBase):
def __init__(self, encoding_circuit: EncodingCircuitBase, executor: Executor, evaluate_duplicates: str='off_diagonal', mit_depol_noise: Union[(str, None)]=None, initial_parameters: Union[(np.ndarray, None)]=None, parameter_seed: Union[(int, None)]=0, regularization: Union[(str, None)]=None) -> None:
super().__init__(encoding_circuit, executor, initial_parameters, parameter_seed, regularization)
self._quantum_kernel = None
self._evaluate_duplicates = evaluate_duplicates
self._mit_depol_noise = mit_depol_noise
self._feature_vector = ParameterVector('x', self.num_features)
if (self.num_parameters > 0):
self._parameter_vector = ParameterVector('', self.num_parameters)
else:
self._parameter_vector = None
self._enc_circ = self._encoding_circuit.get_circuit(self._feature_vector, self._parameter_vector)
if ('statevector_simulator' in str(self._executor._backend)):
if (self._parameter_vector is None):
self._quantum_kernel = FidelityStatevectorKernel(feature_map=self._enc_circ)
else:
self._quantum_kernel = TrainableFidelityStatevectorKernel(feature_map=self._enc_circ, training_parameters=self._parameter_vector)
else:
fidelity = ComputeUncompute(sampler=self._executor.get_sampler())
if (self._parameter_vector is None):
self._quantum_kernel = FidelityQuantumKernel(feature_map=self._enc_circ, fidelity=fidelity, evaluate_duplicates=self._evaluate_duplicates)
else:
self._quantum_kernel = TrainableFidelityQuantumKernel(feature_map=self._enc_circ, fidelity=fidelity, training_parameters=self._parameter_vector, evaluate_duplicates=self._evaluate_duplicates)
def get_params(self, deep: bool=True) -> dict:
params = super().get_params(deep=False)
params['evaluate_duplicates'] = self._evaluate_duplicates
params['mit_depol_noise'] = self._mit_depol_noise
params['regularization'] = self._regularization
params['encoding_circuit'] = self._encoding_circuit
if deep:
params.update(self._encoding_circuit.get_params())
return params
def set_params(self, **params):
num_parameters_backup = self.num_parameters
parameters_backup = self._parameters
valid_params = self.get_params()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
if ('encoding_circuit' in params):
self._encoding_circuit = params['encoding_circuit']
params.pop('encoding_circuit')
dict_encoding_circuit = {}
for key in params.keys():
if (key in self._encoding_circuit.get_params().keys()):
dict_encoding_circuit[key] = params[key]
for key in dict_encoding_circuit.keys():
params.pop(key)
self._encoding_circuit.set_params(**dict_encoding_circuit)
if ('evaluate_duplicates' in params.keys()):
self._evaluate_duplicates = params['evaluate_duplicates'].lower()
params.pop('evaluate_duplicates')
if ('mit_depol_noise' in params.keys()):
self._mit_depol_noise = params['mit_depol_noise']
params.pop('mit_depol_noise')
if ('regularization' in params.keys()):
self._regularization = params['regularization']
params.pop('regularization')
self.__init__(self._encoding_circuit, self._executor, self._evaluate_duplicates, self._mit_depol_noise, None, self._parameter_seed, self._regularization)
if (self.num_parameters == num_parameters_backup):
self._parameters = parameters_backup
if (len(params) > 0):
raise ValueError('The following parameters could not be assigned:', params)
def evaluate(self, x: np.ndarray, y: Union[(np.ndarray, None)]=None) -> np.ndarray:
if (y is None):
y = x
kernel_matrix = np.zeros((x.shape[0], y.shape[0]))
if (self._parameter_vector is not None):
if (self._parameters is None):
raise ValueError('Parameters have to been set with assign_parameters or as initial parameters!')
self._quantum_kernel.assign_training_parameters(self._parameters)
kernel_matrix = self._quantum_kernel.evaluate(x, y)
if (self._mit_depol_noise is not None):
print('WARNING: Advanced option. Do not use it within an squlearn.kernel.ml workflow')
if (not np.array_equal(x, y)):
raise ValueError('Mitigating depolarizing noise works only for square matrices computed on real backend')
elif (self._mit_depol_noise == 'msplit'):
kernel_matrix = self._get_msplit_kernel(kernel_matrix)
elif (self._mit_depol_noise == 'mmean'):
kernel_matrix = self._get_mmean_kernel(kernel_matrix)
if ((self._regularization is not None) and (kernel_matrix.shape[0] == kernel_matrix.shape[1])):
kernel_matrix = self._regularize_matrix(kernel_matrix)
return kernel_matrix
def _get_msplit_kernel(self, kernel: np.ndarray) -> np.ndarray:
msplit_kernel_matrix = np.zeros((kernel.shape[0], kernel.shape[1]))
survival_prob = self._survival_probability(kernel)
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
msplit_kernel_matrix[(i, j)] = ((kernel[(i, j)] - ((2 ** ((- 1.0) * self._num_qubits)) * (1 - (survival_prob[i] * survival_prob[j])))) / (survival_prob[i] * survival_prob[j]))
return msplit_kernel_matrix
def _get_mmean_kernel(self, kernel: np.ndarray) -> np.ndarray:
mmean_kernel_matrix = np.zeros((kernel.shape[0], kernel.shape[1]))
survival_prob_mean = self._survival_probability_mean(kernel)
mmean_kernel_matrix = ((kernel - ((2 ** ((- 1.0) * self._num_qubits)) * (1 - (survival_prob_mean ** 2)))) / (survival_prob_mean ** 2))
return mmean_kernel_matrix
def _survival_probability(self, kernel: np.ndarray) -> np.ndarray:
kernel_diagonal = np.diag(kernel)
surv_prob = np.sqrt(((kernel_diagonal - (2 ** ((- 1.0) * self._num_qubits))) / (1 - (2 ** ((- 1.0) * self._num_qubits)))))
return surv_prob
def _survival_probability_mean(self, kernel: np.ndarray) -> float:
surv_prob = self._survival_probability(kernel)
return np.mean(surv_prob) |
def get_loss(pred, label):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss |
def convert_by_vocab(vocab, items, max_seq_length=None, blank_id=0, unk_id=1, uncased=True):
output = []
unk_num = 0
for item in items:
if uncased:
item = item.lower()
if (item in vocab):
output.append(vocab[item])
else:
output.append(unk_id)
unk_num += 1
return (output, unk_num) |
def _accumulate(iterable, fn=(lambda x, y: (x + y))):
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
(yield total)
for element in it:
total = fn(total, element)
(yield total) |
class SqueezeExcitation(nn.Module):
def __init__(self, input_channels: int, squeeze_factor: int=4):
super().__init__()
squeeze_channels = _make_divisible((input_channels // squeeze_factor), 8)
self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)
def _scale(self, input: Tensor, inplace: bool) -> Tensor:
scale = F.adaptive_avg_pool2d(input, 1)
scale = self.fc1(scale)
scale = self.relu(scale)
scale = self.fc2(scale)
return F.hardsigmoid(scale, inplace=inplace)
def forward(self, input: Tensor) -> Tensor:
scale = self._scale(input, True)
return (scale * input) |
def translate(dx, dy, dz):
return mat4([[1.0, 0.0, 0.0, dx], [0.0, 1.0, 0.0, dy], [0.0, 0.0, 1.0, dz], [0.0, 0.0, 0.0, 1.0]]) |
class TestBatchIterator(object):
def iterator(self):
return ExampleBatchIterator(8)
def test_iterator(self, iterator):
assert (list(iterator) == [0, 1, 2, 3, 4, 5, 6, 7]) |
class PrefetchLoader(object):
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while (batch is not None):
is_tuple = isinstance(batch, tuple)
if is_tuple:
(task, batch) = batch
if is_tuple:
(yield (task, batch))
else:
(yield batch)
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if (batch is not None):
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method |
class _Booleans(_Constraint):
def __init__(self):
super().__init__()
self._constraints = [_InstancesOf(bool), _InstancesOf(np.bool_)]
def is_satisfied_by(self, val):
return any((c.is_satisfied_by(val) for c in self._constraints))
def __str__(self):
return f"{', '.join([str(c) for c in self._constraints[:(- 1)]])} or {self._constraints[(- 1)]}" |
def exec_cmd(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
new_cmd = []
first = True
for e in cmd:
if first:
first = False
new_cmd.append(e)
elif (e != ''):
se = e.split(' ')
if (len(se) > 1):
for e2 in se:
if (e2 != ''):
new_cmd.append(e2)
else:
new_cmd.append(e)
cmd = new_cmd
null = open(os.devnull, 'wb')
try:
return subprocess.call(cmd, stdout=null, stderr=null)
except:
return 1
finally:
null.close() |
def detect_overflow(var, ctx):
detected = False
if torch.isnan(var).any().item():
detected = True
print(f'{ctx} has nans')
if torch.isinf(var).any().item():
detected = True
print(f'{ctx} has infs')
if 0:
n100 = var[torch.ge(var.abs(), 100)]
if (n100.numel() > 0):
print(f'{ctx}: n100={n100.numel()}')
n1000 = var[torch.ge(var.abs(), 1000)]
if (n1000.numel() > 0):
print(f'{ctx}: n1000={n1000.numel()}')
n10000 = var[torch.ge(var.abs(), 10000)]
if (n10000.numel() > 0):
print(f'{ctx}: n10000={n10000.numel()}')
if 0:
print(f'min={var.min():9.2e} max={var.max():9.2e}')
if 0:
print(f'min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})')
return detected |
class PinocchioTestCase(unittest.TestCase):
def assertApprox(self, a, b, eps=1e-06):
return self.assertTrue(isapprox(a, b, eps), ('\n%s\nis not approximately equal to\n%s\nwith precision %f' % (a, b, eps))) |
def _get_experiment_progress(experiment) -> Union[(float, None)]:
if (experiment.status == Status.IN_PROGRESS):
return (experiment.aggregator.round_number / experiment.aggregator.rounds_to_train) |
def get_normalization_norm(func, mean_out, var_out, beta, gamma, constant0, constant1):
nl = []
sub_out = (fork_name(func.input[0]) + '_sub')
n = onnx.helper.make_node('Sub', [func.input[0], mean_out], [sub_out])
nl.append(n)
add_out = (fork_name(func.output[0]) + '_add')
n = onnx.helper.make_node('Add', [var_out, constant0], [add_out])
nl.append(n)
pow_out = (fork_name(func.input[0]) + '_pow')
n = onnx.helper.make_node('Pow', [add_out, constant1], [pow_out])
nl.append(n)
norm = (fork_name(func.output[0]) + '_div')
n = onnx.helper.make_node('Div', [sub_out, pow_out], [norm])
nl.append(n)
if (len(func.input) == 3):
mul_gamma_out = (fork_name(func.output[0]) + '_mul')
n = onnx.helper.make_node('Mul', [norm, gamma], [mul_gamma_out])
nl.append(n)
add_beta_out = (fork_name(func.output[0]) + '_add')
n = onnx.helper.make_node('Add', [mul_gamma_out, beta], [add_beta_out])
nl.append(n)
elif (len(func.input) == 2):
if (func.input[1] == 'gamma'):
mul_gamma_out = (fork_name(func.output[0]) + '_mul')
n = onnx.helper.make_node('Mul', [norm, gamma], [mul_gamma_out])
nl.append(n)
elif (func.input[1] == 'beta'):
add_beta_out = (fork_name(func.output[0]) + '_add')
n = onnx.helper.make_node('Add', [norm, beta], [add_beta_out])
nl.append(n)
nl[(- 1)].output[0] = func.output[0]
return nl |
def result2tag(result, turncate):
sentences = []
for (idx, sentence) in enumerate(result):
valid_len = turncate[idx]
words = []
for word in sentence[:valid_len]:
word = word.tolist()
tag = word.index(max(word))
words.append(tag)
sentences.append(words)
return np.array(sentences) |
class Conv2dAWS(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(Conv2dAWS, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.register_buffer('weight_gamma', torch.ones(self.out_channels, 1, 1, 1))
self.register_buffer('weight_beta', torch.zeros(self.out_channels, 1, 1, 1))
def _get_weight(self, weight):
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
weight = (weight - weight_mean)
std = torch.sqrt((weight.view(weight.size(0), (- 1)).var(dim=1) + 1e-05)).view((- 1), 1, 1, 1)
weight = (weight / std)
weight = ((self.weight_gamma * weight) + self.weight_beta)
return weight
def forward(self, x):
weight = self._get_weight(self.weight)
return super().conv2d_forward(x, weight) |
class TestSeq2SeqCollator(unittest.TestCase):
def test_collate(self):
eos_idx = 1
pad_idx = 0
collater = Seq2SeqCollater(feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx)
frames1 = np.array([[7, 8], [9, 10]])
frames2 = np.array([[1, 2], [3, 4], [5, 6]])
target1 = np.array([4, 2, 3, eos_idx])
target2 = np.array([3, 2, eos_idx])
sample1 = {'id': 0, 'data': [frames1, target1]}
sample2 = {'id': 1, 'data': [frames2, target2]}
batch = collater.collate([sample1, sample2])
self.assertTensorEqual(batch['id'], torch.tensor([1, 0]))
self.assertEqual(batch['ntokens'], 7)
self.assertTensorEqual(batch['net_input']['src_tokens'], torch.tensor([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]]))
self.assertTensorEqual(batch['net_input']['prev_output_tokens'], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]))
self.assertTensorEqual(batch['net_input']['src_lengths'], torch.tensor([3, 2]))
self.assertTensorEqual(batch['target'], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]))
self.assertEqual(batch['nsentences'], 2)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0) |
class EggInfoDistribution(BaseInstalledDistribution):
requested = True
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower()
s.version = v
self.path = path
self.dist_path = env
if (env and env._cache_enabled and (path in env._cache_egg.path)):
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
set_name_and_version(self, metadata.name, metadata.version)
if (env and env._cache_enabled):
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r', line)
break
r = parse_requirement(line)
if (not r):
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are not supported')
if (not r.constraints):
reqs.append(r.name)
else:
cons = ', '.join((('%s%s' % c) for c in r.constraints))
reqs.append(('%s (%s)' % (r.name, cons)))
return reqs
def parse_requires_path(req_path):
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
tl_path = tl_data = None
if path.endswith('.egg'):
if os.path.isdir(path):
p = os.path.join(path, 'EGG-INFO')
meta_path = os.path.join(p, 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(p, 'requires.txt')
tl_path = os.path.join(p, 'top_level.txt')
requires = parse_requires_path(req_path)
else:
zipf = zipimport.zipimporter(path)
fileobj = StringIO(zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
tl_path = os.path.join(path, 'top_level.txt')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException(('path must end with .egg-info or .egg, got %r' % path))
if requires:
metadata.add_requirements(requires)
if (tl_data is None):
if ((tl_path is not None) and os.path.exists(tl_path)):
with open(tl_path, 'rb') as f:
tl_data = f.read().decode('utf-8')
if (not tl_data):
tl_data = []
else:
tl_data = tl_data.splitlines()
self.modules = tl_data
return metadata
def __repr__(self):
return ('<EggInfoDistribution %r %s at %r>' % (self.name, self.version, self.path))
def __str__(self):
return ('%s %s' % (self.name, self.version))
def check_installed_files(self):
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for (path, _, _) in self.list_installed_files():
if (path == record_path):
continue
if (not os.path.exists(path)):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
if (not os.path.exists(p)):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
if (not os.path.isdir(p)):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if (line == './'):
skip = False
continue
if (not skip):
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
(yield p)
else:
(yield line)
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and (self.path == other.path))
__hash__ = object.__hash__ |
class RandomDomainSampler(Sampler):
def __init__(self, data_source, batch_size, n_domain):
self.data_source = data_source
self.domain_dict = defaultdict(list)
for (i, items) in enumerate(data_source):
camid = items[2]
self.domain_dict[camid].append(i)
self.domains = list(self.domain_dict.keys())
if ((n_domain is None) or (n_domain <= 0)):
n_domain = len(self.domains)
assert ((batch_size % n_domain) == 0)
self.n_img_per_domain = (batch_size // n_domain)
self.batch_size = batch_size
self.n_domain = n_domain
self.length = len(list(self.__iter__()))
def __iter__(self):
domain_dict = copy.deepcopy(self.domain_dict)
final_idxs = []
stop_sampling = False
while (not stop_sampling):
selected_domains = random.sample(self.domains, self.n_domain)
for domain in selected_domains:
idxs = domain_dict[domain]
selected_idxs = random.sample(idxs, self.n_img_per_domain)
final_idxs.extend(selected_idxs)
for idx in selected_idxs:
domain_dict[domain].remove(idx)
remaining = len(domain_dict[domain])
if (remaining < self.n_img_per_domain):
stop_sampling = True
return iter(final_idxs)
def __len__(self):
return self.length |
class TestGeomspace(object):
def test_basic(self):
y = geomspace(1, 1000000.0)
assert_((len(y) == 50))
y = geomspace(1, 1000000.0, num=100)
assert_((y[(- 1)] == (10 ** 6)))
y = geomspace(1, 1000000.0, endpoint=False)
assert_((y[(- 1)] < (10 ** 6)))
y = geomspace(1, 1000000.0, num=7)
assert_array_equal(y, [1, 10, 100, 1000.0, 10000.0, 100000.0, 1000000.0])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace((- 1), (- 100), num=3)
assert_array_equal(y, [(- 1), (- 10), (- 100)])
assert_array_equal(y.imag, 0)
y = geomspace((- 100), (- 1), num=3)
assert_array_equal(y, [(- 100), (- 10), (- 1)])
assert_array_equal(y.imag, 0)
def test_complex(self):
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace((- 4j), (- 324j), num=5)
assert_allclose(y, [(- 4j), (- 12j), (- 36j), (- 108j), (- 324j)])
assert_array_equal(y.real, 0)
y = geomspace((1 + 1j), (1000 + 1000j), num=4)
assert_allclose(y, [(1 + 1j), (10 + 10j), (100 + 100j), (1000 + 1000j)])
y = geomspace(((- 1) + 1j), ((- 1000) + 1000j), num=4)
assert_allclose(y, [((- 1) + 1j), ((- 10) + 10j), ((- 100) + 100j), ((- 1000) + 1000j)])
y = geomspace((- 1), 1, num=3, dtype=complex)
assert_allclose(y, [(- 1), 1j, (+ 1)])
y = geomspace((0 + 3j), ((- 3) + 0j), 3)
assert_allclose(y, [(0 + 3j), (((- 3) / sqrt(2)) + (3j / sqrt(2))), ((- 3) + 0j)])
y = geomspace((0 + 3j), (3 + 0j), 3)
assert_allclose(y, [(0 + 3j), ((3 / sqrt(2)) + (3j / sqrt(2))), (3 + 0j)])
y = geomspace(((- 3) + 0j), (0 - 3j), 3)
assert_allclose(y, [((- 3) + 0j), (((- 3) / sqrt(2)) - (3j / sqrt(2))), (0 - 3j)])
y = geomspace((0 + 3j), ((- 3) + 0j), 3)
assert_allclose(y, [(0 + 3j), (((- 3) / sqrt(2)) + (3j / sqrt(2))), ((- 3) + 0j)])
y = geomspace(((- 2) - 3j), (5 + 7j), 7)
assert_allclose(y, [((- 2) - 3j), ((- 0.) - 4.j), (2. - 4.j), (4. - 3.j), (6. - 0.j), (6. + 3.j), (5 + 7j)])
y = geomspace(3j, (- 5), 2)
assert_allclose(y, [3j, (- 5)])
y = geomspace((- 5), 3j, 2)
assert_allclose(y, [(- 5), 3j])
def test_dtype(self):
y = geomspace(1, 1000000.0, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1000000.0, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1000000.0, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
y = geomspace(1, 1000000.0, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1000000.0, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype='int8')
lim2 = array([(- 120), (- 100)], dtype='int8')
lim3 = array([1200, 1000], dtype='uint16')
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace((- 120.0), (- 100.0), 5)
t6 = geomspace(1200.0, 1000.0, 5)
assert_allclose(t1, t4, rtol=0.01)
assert_allclose(t2, t5, rtol=0.01)
assert_allclose(t3, t6, rtol=1e-05)
def test_start_stop_array(self):
start = array([1.0, 32.0, 1j, (- 4j), (1 + 1j), (- 1)])
stop = array([10000.0, 2.0, 16j, (- 324j), (10000 + 10000j), 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5) for (_start, _stop) in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5) for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=(- 1))
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert (type(gs) is PhysicalQuantity2)
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert (type(gs) is PhysicalQuantity2)
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0) |
def debug_print_file(fn):
print(('%s:' % fn))
if (not os.path.exists(fn)):
print('<does not exist>')
return
if os.path.isdir(fn):
print('<dir:>')
pprint(sorted(os.listdir(fn)))
return
print(open(fn).read()) |
def folder2lmdb(dpath, name='train', write_frequency=5000):
directory = osp.expanduser(osp.join(dpath, name))
print(('Loading dataset from %s' % directory))
dataset = ImageFolder(directory, loader=raw_reader)
data_loader = DataLoader(dataset, num_workers=4, collate_fn=(lambda x: x))
lmdb_path = osp.join(dpath, ('%s.lmdb' % name))
isdir = os.path.isdir(lmdb_path)
print(('Generate LMDB to %s' % lmdb_path))
db = lmdb.open(lmdb_path, subdir=isdir, map_size=( * 2), readonly=False, meminit=False, map_async=True)
txn = db.begin(write=True)
for (idx, data) in enumerate(data_loader):
(image, label) = data[0]
txn.put(u'{}'.format(idx).encode('ascii'), dumps_pyarrow((image, label)))
if ((idx % write_frequency) == 0):
print(('[%d/%d]' % (idx, len(data_loader))))
txn.commit()
txn = db.begin(write=True)
txn.commit()
keys = [u'{}'.format(k).encode('ascii') for k in range((idx + 1))]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps_pyarrow(keys))
txn.put(b'__len__', dumps_pyarrow(len(keys)))
print('Flushing database ...')
db.sync()
db.close() |
def main():
set_seeds(2020)
args = vars(parser.parse_args())
alphabet = Protein()
cfgs = []
data_cfg = config.DataConfig(args['data_config'])
cfgs.append(data_cfg)
if (args['lm_model_config'] is None):
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), num_classes=5)
cfgs += [model_cfg]
else:
lm_model_cfg = config.ModelConfig(args['lm_model_config'], idx='lm_model_config', input_dim=len(alphabet))
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), lm_dim=((lm_model_cfg.num_layers * lm_model_cfg.hidden_dim) * 2), num_classes=5)
cfgs += [model_cfg, lm_model_cfg]
run_cfg = config.RunConfig(args['run_config'], eval=True, sanity_check=args['sanity_check'])
cfgs.append(run_cfg)
(output, save_prefix) = set_output(args, 'eval_homology_log', test=True)
os.environ['CUDA_VISIBLE_DEVICES'] = (args['device'] if (args['device'] is not None) else '')
(device, data_parallel) = (torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), (torch.cuda.device_count() > 1))
config.print_configs(args, cfgs, device, output)
flag_rnn = (model_cfg.model_type == 'RNN')
flag_lm_model = (args['pretrained_lm_model'] is not None)
flag_cm_model = (args['pretrained_cm_model'] is not None)
(idxs_test, datasets_test, iterators_test) = ([key for key in data_cfg.path.keys() if ('pairs' in key)], [], [])
start = Print(' '.join((['start loading test datasets'] + idxs_test)), output)
collate_fn = (dataset.collate_paired_sequences if flag_rnn else None)
for idx_test in idxs_test:
dataset_test = homology.load_homology_pairs(data_cfg, idx_test, alphabet, flag_cm_model, args['sanity_check'])
dataset_test = dataset.PairedHomology_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn)
datasets_test.append(dataset_test)
iterators_test.append(iterator_test)
end = Print(' '.join(['loaded', str(len(dataset_test)), 'sequence pairs']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start initializing a model', output)
models_list = []
if (not flag_rnn):
model = plus_tfm.PLUS_TFM(model_cfg)
elif (not flag_lm_model):
model = plus_rnn.PLUS_RNN(model_cfg)
else:
model = p_elmo.P_ELMo(model_cfg)
models_list.append([model, '', True, False, False])
if flag_lm_model:
lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
models_list.append([lm_model, 'lm', True, False, False])
if flag_cm_model:
cm_model = cnn.ConvNet2D(model_cfg.embedding_dim)
models_list.append([cm_model, 'cm', True, False, False])
load_models(args, models_list, device, data_parallel, output)
get_loss = (plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss)
end = Print('end initializing a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start setting trainer configurations', output)
tasks_list = []
tasks_list.append(['cls', [], ['acc', 'r', 'rho', 'aupr_cl', 'aupr_fo', 'aupr_sf', 'aupr_fa']])
if (not flag_lm_model):
tasks_list.append(['lm', [], ['acc']])
if flag_cm_model:
tasks_list.append(['cm', [], ['pr', 're', 'f1']])
trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)
trainer_args = {}
trainer_args['data_parallel'] = data_parallel
trainer_args['paired'] = True
if flag_rnn:
trainer_args['evaluate_cls'] = plus_rnn.evaluate_homology
else:
trainer_args['evaluate_cls'] = plus_tfm.evaluate_homology
trainer_args['evaluate'] = ['cls', homology.evaluate_homology]
end = Print('end setting trainer configurations', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start evaluating a model', output)
Print(trainer.get_headline(test=True), output)
for (idx_test, dataset_test, iterator_test) in zip(idxs_test, datasets_test, iterators_test):
dataset_test.set_augment(False)
trainer.set_exec_flags(['cls', 'lm', 'cm'], [True, False, True])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# cls {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
if (not flag_lm_model):
dataset_test.set_augment(True)
trainer.set_exec_flags(['cls', 'lm', 'cm'], [False, True, False])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# lm {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
Print(trainer.get_log(test_idx=idx_test, args=trainer_args), output)
trainer.reset()
end = Print('end evaluating a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
output.close() |
def generate_bsm_links(graph, node_procs, parsed_args, bsm_naming_func):
cchannels = []
qchannels = []
bsm_nodes = []
for (i, node_pair) in enumerate(graph.edges):
(node1, node2) = node_pair
bsm_name = bsm_naming_func(node1, node2)
bsm_node = {Topology.NAME: bsm_name, Topology.TYPE: RouterNetTopo.BSM_NODE, Topology.SEED: i, RouterNetTopo.GROUP: node_procs[node1]}
bsm_nodes.append(bsm_node)
for node in node_pair:
qchannels.append({Topology.SRC: node, Topology.DST: bsm_name, Topology.DISTANCE: (parsed_args.qc_length * 500), Topology.ATTENUATION: parsed_args.qc_atten})
for node in node_pair:
cchannels.append({Topology.SRC: bsm_name, Topology.DST: node, Topology.DELAY: (parsed_args.cc_delay * .0)})
cchannels.append({Topology.SRC: node, Topology.DST: bsm_name, Topology.DELAY: (parsed_args.cc_delay * .0)})
return (cchannels, qchannels, bsm_nodes) |
def get_out_mask(cfg, pred_mask):
mask_loss_type = cfg.MODEL.CDPN.ROT_HEAD.MASK_LOSS_TYPE
(bs, c, h, w) = pred_mask.shape
if (mask_loss_type == 'L1'):
assert (c == 1), c
mask_max = torch.max(pred_mask.view(bs, (- 1)), dim=(- 1))[0].view(bs, 1, 1, 1)
mask_min = torch.min(pred_mask.view(bs, (- 1)), dim=(- 1))[0].view(bs, 1, 1, 1)
out_mask = ((pred_mask - mask_min) / (mask_max - mask_min))
elif (mask_loss_type == 'BCE'):
assert (c == 1), c
out_mask = torch.sigmoid(pred_mask)
elif (mask_loss_type == 'CE'):
out_mask = torch.argmax(pred_mask, dim=1, keepdim=True)
else:
raise NotImplementedError(f'unknown mask loss type: {mask_loss_type}')
return out_mask |
class OpenAIGPTConfig(PretrainedConfig):
pretrained_config_archive_map = OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self, vocab_size_or_config_json_file=40478, n_positions=512, n_ctx=512, n_embd=768, n_layer=12, n_head=12, afn='gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, predict_special_tokens=True, num_labels=1, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs):
super(OpenAIGPTConfig, self).__init__(**kwargs)
if (isinstance(vocab_size_or_config_json_file, str) or ((sys.version_info[0] == 2) and isinstance(vocab_size_or_config_json_file, unicode))):
with open(vocab_size_or_config_json_file, 'r', encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for (key, value) in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
else:
raise ValueError('First argument must be either a vocabulary size (int)or the path to a pretrained model config file (str)')
def max_position_embeddings(self):
return self.n_positions
def hidden_size(self):
return self.n_embd
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
xp = array_namespace(cond, *arrays)
if ((f2 is fillvalue is None) or ((f2 is not None) and (fillvalue is not None))):
raise ValueError('Exactly one of `fillvalue` or `f2` must be given.')
args = xp.broadcast_arrays(cond, *arrays)
(cond, arrays) = (xp.astype(args[0], bool, copy=False), args[1:])
temp1 = xp.asarray(f(*(arr[cond] for arr in arrays)))
if (f2 is None):
fillvalue = xp.asarray(fillvalue)
dtype = xp.result_type(temp1.dtype, fillvalue.dtype)
out = xp.full(cond.shape, fill_value=fillvalue, dtype=dtype)
else:
ncond = (~ cond)
temp2 = xp.asarray(f2(*(arr[ncond] for arr in arrays)))
dtype = xp.result_type(temp1, temp2)
out = xp.empty(cond.shape, dtype=dtype)
out[ncond] = temp2
out[cond] = temp1
return out |
class Reshape(nn.Module):
def __init__(self, *shape: Union[(int, str)]):
super().__init__()
self.shape = shape
def forward(self, x: torch.Tensor):
shape = [(x.shape[int(i)] if isinstance(i, str) else i) for i in self.shape]
return x.reshape(*shape) |
.mpl_image_compare
def test_plot_results_components_no_cls(datadir):
data = json.load(datadir.joinpath('tail_probs_hypotest_results.json').open(encoding='utf-8'))
fig = Figure()
ax = fig.subplots()
brazil_band_collection = brazil.plot_results(data['testmus'], data['results'], test_size=0.05, ax=ax, components=True, no_cls=True)
assert (brazil_band_collection.cls_obs is None)
assert (brazil_band_collection.cls_exp is None)
assert (brazil_band_collection.one_sigma_band is None)
assert (brazil_band_collection.two_sigma_band is None)
assert (brazil_band_collection.test_size is None)
assert (brazil_band_collection.clsb is not None)
assert (brazil_band_collection.clb is not None)
assert (brazil_band_collection.axes == ax)
return fig |
def get_vocabulary(fobj, is_dict=False):
vocab = Counter()
for (i, line) in enumerate(fobj):
if is_dict:
try:
(word, count) = line.strip('\r\n ').split(' ')
except:
print('Failed reading vocabulary file at line {0}: {1}'.format(i, line))
sys.exit(1)
vocab[word] += int(count)
else:
for word in line.strip('\r\n ').split(' '):
if word:
vocab[word] += 1
return vocab |
_params({'labels_true': ['array-like'], 'labels_pred': ['array-like'], 'beta': [Interval(Real, 0, None, closed='left')]}, prefer_skip_nested_validation=True)
def v_measure_score(labels_true, labels_pred, *, beta=1.0):
return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2] |
def get_word_embedding(sp_output_dir=None):
model = Ner.from_pretrained(sp_output_dir)
tokenizer = BertTokenizer.from_pretrained(sp_output_dir, do_lower_case=args.do_lower_case)
for (name, parameters) in model.named_parameters():
if (name == 'bert.embeddings.word_embeddings.weight'):
bert_embedding = parameters.detach().cpu().numpy()
wordidx2ebd = {idx: bert_embedding[idx] for idx in range(bert_embedding.shape[0])}
ebd2wordidx = {}
for (k, v) in wordidx2ebd.items():
ebd2wordidx[tuple(v)] = k
return (wordidx2ebd, ebd2wordidx) |
class SyncBNFunc(Function):
def forward(ctx, in_data, scale_data, shift_data, running_mean, running_var, eps, momentum, training):
if in_data.is_cuda:
ctx.eps = eps
(N, C, H, W) = in_data.size()
in_data = in_data.view(N, C, (- 1))
mean_in = in_data.mean((- 1), keepdim=True)
var_in = in_data.var((- 1), keepdim=True)
temp = (var_in + (mean_in ** 2))
if training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = (temp.mean(0, keepdim=True) - (mean_bn ** 2))
sum_x = ((mean_bn ** 2) + var_bn)
dist.all_reduce(mean_bn)
mean_bn /= dist.get_world_size()
dist.all_reduce(sum_x)
sum_x /= dist.get_world_size()
var_bn = (sum_x - (mean_bn ** 2))
running_mean.mul_(momentum)
running_mean.add_(((1 - momentum) * mean_bn.data))
running_var.mul_(momentum)
running_var.add_(((1 - momentum) * var_bn.data))
else:
mean_bn = torch.autograd.Variable(running_mean)
var_bn = torch.autograd.Variable(running_var)
x_hat = ((in_data - mean_bn) / (var_bn + ctx.eps).sqrt())
x_hat = x_hat.view(N, C, H, W)
out_data = ((x_hat * scale_data) + shift_data)
ctx.save_for_backward(in_data.data, scale_data.data, x_hat.data, mean_bn.data, var_bn.data)
else:
raise RuntimeError('SyncBNFunc only support CUDA computation!')
return out_data
def backward(ctx, grad_outdata):
if grad_outdata.is_cuda:
(in_data, scale_data, x_hat, mean_bn, var_bn) = ctx.saved_tensors
(N, C, H, W) = grad_outdata.size()
scaleDiff = torch.sum((grad_outdata * x_hat), [0, 2, 3], keepdim=True)
shiftDiff = torch.sum(grad_outdata, [0, 2, 3], keepdim=True)
dist.all_reduce(scaleDiff)
dist.all_reduce(shiftDiff)
inDiff = ((scale_data / (var_bn.view(1, C, 1, 1) + ctx.eps).sqrt()) * (grad_outdata - ((1 / (((N * H) * W) * dist.get_world_size())) * ((scaleDiff * x_hat) + shiftDiff))))
else:
raise RuntimeError('SyncBNFunc only support CUDA computation!')
return (inDiff, scaleDiff, shiftDiff, None, None, None, None, None) |
class H_Swish(nn.Module):
def forward(self, x):
out = ((x * F.relu6((x + 3), inplace=True)) / 6)
return out |
class set_layer_config():
def __init__(self, scriptable: Optional[bool]=None, exportable: Optional[bool]=None, no_jit: Optional[bool]=None, no_activation_jit: Optional[bool]=None):
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
self.prev = (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT)
if (scriptable is not None):
_SCRIPTABLE = scriptable
if (exportable is not None):
_EXPORTABLE = exportable
if (no_jit is not None):
_NO_JIT = no_jit
if (no_activation_jit is not None):
_NO_ACTIVATION_JIT = no_activation_jit
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
(_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) = self.prev
return False |
class InpaintGenerator(BaseNetwork):
def __init__(self, init_weights=True):
super(InpaintGenerator, self).__init__()
channel = 256
hidden = 512
stack_num = 8
num_head = 4
kernel_size = (7, 7)
padding = (3, 3)
stride = (3, 3)
output_size = (60, 108)
blocks = []
dropout = 0.0
t2t_params = {'kernel_size': kernel_size, 'stride': stride, 'padding': padding, 'output_size': output_size}
n_vecs = 1
for (i, d) in enumerate(kernel_size):
n_vecs *= int((((((output_size[i] + (2 * padding[i])) - (d - 1)) - 1) / stride[i]) + 1))
for _ in range(stack_num):
blocks.append(TransformerBlock(hidden=hidden, num_head=num_head, dropout=dropout, n_vecs=n_vecs, t2t_params=t2t_params))
self.transformer = nn.Sequential(*blocks)
self.ss = SoftSplit((channel // 2), hidden, kernel_size, stride, padding, dropout=dropout)
self.add_pos_emb = AddPosEmb(n_vecs, hidden)
self.sc = SoftComp((channel // 2), hidden, output_size, kernel_size, stride, padding)
self.encoder = Encoder()
self.decoder = nn.Sequential(deconv((channel // 2), 128, kernel_size=3, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True), deconv(64, 64, kernel_size=3, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1))
if init_weights:
self.init_weights()
def forward(self, masked_frames):
(b, t, c, h, w) = masked_frames.size()
time0 = time.time()
enc_feat = self.encoder(masked_frames.view((b * t), c, h, w))
(_, c, h, w) = enc_feat.size()
trans_feat = self.ss(enc_feat, b)
trans_feat = self.add_pos_emb(trans_feat)
trans_feat = self.transformer(trans_feat)
trans_feat = self.sc(trans_feat, t)
enc_feat = (enc_feat + trans_feat)
output = self.decoder(enc_feat)
output = torch.tanh(output)
return output |
class TFPegasusForConditionalGeneration():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.