code stringlengths 101 5.91M |
|---|
def host_vulners_scan(api, ip):
time.sleep(REQUEST_DELAY_SLEEP_TIME)
try:
host_data = api.host(ip)
except shodan.exception.APIError as rate_limit_err:
print('{color}Request limit error (vulnerabilities): {error_info}{reset}'.format(error_info=rate_limit_err, color=ERROR_COLOR, reset=RESET_COLOR))
time.sleep(REQUEST_LIMIT_SLEEP_TIME)
return host_vulners_scan(api, ip)
except Exception as unknown_error:
print('{color}Error: {error_info}{reset}'.format(error_info=unknown_error, color=ERROR_COLOR, reset=RESET_COLOR))
return
snmp_vulner = snmp_checker(host_data, ip)
host_vulners = host_data.get('vulns')
if snmp_vulner:
if (host_vulners is None):
host_vulners = []
host_vulners.append(snmp_vulner)
if (not host_vulners):
return
return host_vulners |
class ErnieMForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class BlipModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class XLNetForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ValueFunction(nn.Module):
_encoder: Encoder
_fc: nn.Linear
def __init__(self, encoder: Encoder, hidden_size: int):
super().__init__()
self._encoder = encoder
self._fc = nn.Linear(hidden_size, 1)
def forward(self, x: TorchObservation) -> torch.Tensor:
h = self._encoder(x)
return cast(torch.Tensor, self._fc(h))
def __call__(self, x: TorchObservation) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x)) |
class BackboneOutput(ModelOutput):
feature_maps: Tuple[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None |
class DictIterationNextNode(Node):
child_attrs = ['dict_obj', 'expected_size', 'pos_index_var', 'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var', 'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
coerced_key_var = key_ref = None
coerced_value_var = value_ref = None
coerced_tuple_var = tuple_ref = None
def __init__(self, dict_obj, expected_size, pos_index_var, key_target, value_target, tuple_target, is_dict_flag):
Node.__init__(self, dict_obj.pos, dict_obj=dict_obj, expected_size=expected_size, pos_index_var=pos_index_var, key_target=key_target, value_target=value_target, tuple_target=tuple_target, is_dict_flag=is_dict_flag, is_temp=True, type=PyrexTypes.c_bint_type)
def analyse_expressions(self, env):
from . import ExprNodes
self.dict_obj = self.dict_obj.analyse_types(env)
self.expected_size = self.expected_size.analyse_types(env)
if self.pos_index_var:
self.pos_index_var = self.pos_index_var.analyse_types(env)
if self.key_target:
self.key_target = self.key_target.analyse_target_types(env)
self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type)
self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env)
if self.value_target:
self.value_target = self.value_target.analyse_target_types(env)
self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
if self.tuple_target:
self.tuple_target = self.tuple_target.analyse_target_types(env)
self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type)
self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env)
self.is_dict_flag = self.is_dict_flag.analyse_types(env)
return self
def generate_function_definitions(self, env, code):
self.dict_obj.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached('dict_iter', 'Optimize.c'))
self.dict_obj.generate_evaluation_code(code)
assignments = []
temp_addresses = []
for (var, result, target) in [(self.key_ref, self.coerced_key_var, self.key_target), (self.value_ref, self.coerced_value_var, self.value_target), (self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]:
if (target is None):
addr = 'NULL'
else:
assignments.append((var, result, target))
var.allocate(code)
addr = ('&%s' % var.result())
temp_addresses.append(addr)
result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln(('%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);' % (result_temp, self.dict_obj.py_result(), self.expected_size.result(), self.pos_index_var.result(), temp_addresses[0], temp_addresses[1], temp_addresses[2], self.is_dict_flag.result())))
code.putln(('if (unlikely(%s == 0)) break;' % result_temp))
code.putln(code.error_goto_if(('%s == -1' % result_temp), self.pos))
code.funcstate.release_temp(result_temp)
for (var, result, target) in assignments:
code.put_gotref(var.result())
for (var, result, target) in assignments:
result.generate_evaluation_code(code)
for (var, result, target) in assignments:
target.generate_assignment_code(result, code)
var.release(code) |
class WILDTRACK(ReidBaseDataModule):
dataset_dir = 'ReID_format'
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
self.dataset_dir = osp.join(cfg.DATASETS.ROOT_DIR, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_train')
def setup(self):
self._check_before_run()
transforms_base = ReidTransforms(self.cfg)
(train, train_dict) = self._process_dir(self.train_dir, relabel=True)
self.train_dict = train_dict
self.train_list = train
self.train = BaseDatasetLabelledPerPid(train_dict, transforms_base.build_transforms(is_train=True), self.num_instances, self.cfg.DATALOADER.USE_RESAMPLING)
(query, query_dict) = self._process_dir(self.query_dir, relabel=False)
(gallery, gallery_dict) = self._process_dir(self.gallery_dir, relabel=False)
self.query_list = query
self.gallery_list = gallery
self.val = BaseDatasetLabelled((query + gallery), transforms_base.build_transforms(is_train=False))
self._print_dataset_statistics(train, query, gallery)
(num_query_pids, num_query_imgs, num_query_cams) = self._get_imagedata_info(query)
(num_train_pids, num_train_imgs, num_train_cams) = self._get_imagedata_info(train)
self.num_query = len(query)
self.num_classes = num_train_pids
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset_dict = defaultdict(list)
dataset = []
for (idx, img_path) in enumerate(img_paths):
(pid, camid) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid, idx))
dataset_dict[pid].append((img_path, pid, camid, idx))
return (dataset, dataset_dict) |
class LipschitzCube(nn.Module):
def forward(self, x):
return ((((x >= 1).to(x) * (x - (2 / 3))) + ((x <= (- 1)).to(x) * (x + (2 / 3)))) + ((((x > (- 1)) * (x < 1)).to(x) * (x ** 3)) / 3)) |
class normsys_builder():
is_shared = True
def __init__(self, config):
self.builder_data = {}
self.config = config
self.required_parsets = {}
def collect(self, thismod, nom):
maskval = (True if thismod else False)
lo_factor = (thismod['data']['lo'] if thismod else 1.0)
hi_factor = (thismod['data']['hi'] if thismod else 1.0)
nom_data = ([1.0] * len(nom))
lo = ([lo_factor] * len(nom))
hi = ([hi_factor] * len(nom))
mask = ([maskval] * len(nom))
return {'lo': lo, 'hi': hi, 'mask': mask, 'nom_data': nom_data}
def append(self, key, channel, sample, thismod, defined_samp):
self.builder_data.setdefault(key, {}).setdefault(sample, {}).setdefault('data', {'hi': [], 'lo': [], 'nom_data': [], 'mask': []})
nom = (defined_samp['data'] if defined_samp else ([0.0] * self.config.channel_nbins[channel]))
moddata = self.collect(thismod, nom)
self.builder_data[key][sample]['data']['nom_data'] += moddata['nom_data']
self.builder_data[key][sample]['data']['lo'] += moddata['lo']
self.builder_data[key][sample]['data']['hi'] += moddata['hi']
self.builder_data[key][sample]['data']['mask'] += moddata['mask']
if thismod:
self.required_parsets.setdefault(thismod['name'], [required_parset(defined_samp['data'], thismod['data'])])
def finalize(self):
return self.builder_data |
def main():
f_dir = 'raw/node_2008_2010/'
fnames = find_filenames(f_dir)
outname = 'redditcomments_edgefeat_2008_2010.csv'
idx = 0
for fname in tqdm(fnames):
if (idx == 0):
read_nodeattr((f_dir + fname), outname, write_header=True)
else:
read_nodeattr((f_dir + fname), outname, write_header=False)
idx += 1 |
def batchnorm_forward_folding_node_matchers() -> [BaseNode, BaseNode]:
bn_or_dw1x1_node = (NodeOperationMatcher(BatchNormalization) | NodeOperationMatcher(DepthwiseConv2D))
conv_node = ((NodeOperationMatcher(DepthwiseConv2D) | NodeOperationMatcher(Conv2D)) | NodeOperationMatcher(Conv2DTranspose))
return (bn_or_dw1x1_node, conv_node) |
def load_progress(progress_csv_path):
print(('Reading %s' % progress_csv_path))
entries = dict()
if (progress_csv_path.split('.')[(- 1)] == 'csv'):
delimiter = ','
else:
delimiter = '\t'
with open(progress_csv_path, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter=delimiter)
for row in reader:
for (k, v) in row.items():
if (k not in entries):
entries[k] = []
try:
entries[k].append(float(v))
except:
entries[k].append(0.0)
entries = dict([(k, np.array(v)) for (k, v) in entries.items()])
return entries |
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
type_b = type(value_b)
type_a = type(value_a)
if (type_a is type_b):
return value_a
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, str):
value_a = str(value_a)
elif (isinstance(value_a, tuple) and isinstance(value_b, list)):
value_a = list(value_a)
elif (isinstance(value_a, list) and isinstance(value_b, tuple)):
value_a = tuple(value_a)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, value_b, value_a, full_key))
return value_a |
def compute_f1(list_ref, list_hyp):
ref = collections.Counter(list_ref)
hyp = collections.Counter(list_hyp)
true = sum(ref.values())
positive = sum(hyp.values())
true_positive = sum((ref & hyp).values())
precision = ((float(true_positive) / positive) if positive else 1.0)
recall = ((float(true_positive) / true) if true else 1.0)
if ((precision + recall) > 0.0):
f1 = (((2.0 * precision) * recall) / (precision + recall))
else:
f1 = 0.0
return F1Scores(f1=f1, precision=precision, recall=recall) |
def _collect_and_assign_act_threshold(graph: Graph, representative_data_gen: Callable, core_config: CoreConfig, fw_info: FrameworkInfo, fw_impl: FrameworkImplementation):
analyzer_graph(fw_impl.attach_sc_to_node, graph, fw_info, core_config.quantization_config)
mi = ModelCollector(graph, fw_impl, fw_info)
for _data in tqdm(representative_data_gen()):
mi.infer(_data)
for n in list(graph.nodes):
if n.is_activation_quantization_enabled():
activation_params = get_activations_qparams(activation_quant_cfg=n.final_activation_quantization_cfg, nodes_prior_info=n.prior_info, out_stats_container=graph.get_out_stats_collector(n))
n.final_activation_quantization_cfg.set_activation_quantization_param(activation_params) |
('grammar', 'wikisql')
class WikiSqlLanguage():
root_type = 'select'
def __init__(self):
custom_primitive_type_checkers = {'column': (lambda x: isinstance(x, int))}
self.pointers = {'column'}
self.ast_wrapper = ast_util.ASTWrapper(asdl.parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'WikiSQL.asdl')), custom_primitive_type_checkers=custom_primitive_type_checkers)
def parse(self, code, section):
return self.parse_select(code)
def unparse(self, tree, item):
return self.unparse_select(tree)
def tokenize_field_value(cls, field_value):
assert isinstance(field_value, str)
ann = corenlp.annotate(field_value, annotators=['tokenize'])
result = []
for token in ann.sentencelessToken:
result += list(token.before)
result.append(token.originalText.lower())
return result
def parse_select(self, select):
return filter_nones({'_type': 'select', 'agg': {'_type': self.AGG_TYPES_F[select['agg']]}, 'col': select['sel'], 'conds': [self.parse_cond(c) for c in select['conds']]})
def parse_cond(self, cond):
(column_index, operator_index, value) = cond
return {'_type': 'cond', 'op': {'_type': self.CMP_TYPES_F[operator_index]}, 'col': column_index, 'value': str(value).lower()}
def unparse_select(self, select):
return {'agg': self.AGG_TYPES_B[select['agg']['_type']], 'sel': select['col'], 'conds': [self.unparse_cond(c) for c in select.get('conds', [])]}
def unparse_cond(self, cond):
return [cond['col'], self.CMP_TYPES_B[cond['op']['_type']], cond['value']]
(AGG_TYPES_F, AGG_TYPES_B) = bimap(range(6), ('NoAgg', 'Max', 'Min', 'Count', 'Sum', 'Avg'))
(CMP_TYPES_F, CMP_TYPES_B) = bimap(range(4), ('Equal', 'GreaterThan', 'LessThan', 'Other')) |
def format(_pattern, _quote_all=False, **kwargs):
fmt = SequenceFormatter(separator=' ')
if _quote_all:
fmt.element_formatter = AlwaysQuotedFormatter()
else:
fmt.element_formatter = QuotedFormatter()
try:
return fmt.format(_pattern, **kwargs)
except KeyError as ex:
raise NameError(f'The name {ex} is unknown in this context. Please make sure that you defined that variable. Also note that braces not used for variable access have to be escaped by repeating them ') |
def extract_answer(completion):
ANS_RE = re.compile('#### (\\-?[0-9\\.\\,]+)')
match = ANS_RE.search(completion)
if match:
match_str = match.group(1).strip()
match_str = match_str.replace(',', '')
return match_str
else:
assert False |
def eval_align_batchN(model, loader, P=256):
predictList = []
lossList = []
for data in loader:
(loss, out) = model.test(data, if_eval=True)
lossList.append(loss.item())
predictList.extend(out.reshape((- 1), P).tolist())
return (predictList, lossList) |
def less_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
return ([None] * (len(grad_inputs) + len(inputs))) |
class SpecRCSteps(RCSteps):
def branch(self):
rc = SpecRCSteps(self.rc_builtin)
rc.copy_from(self)
return rc
def get_assert_rc_spec(self, lhs: Expression, rhs: Expression, desc_ctx: LeanDescContext) -> List[str]:
return [f'is_range_checked (rc_bound F) ({to_lean_description(expr=expr, context=desc_ctx)})' for (expr, _, _) in self.get_assert_rc_check(lhs, rhs)]
def get_var_rc_spec(self, name: str, expr: Expression, desc_ctx: LeanDescContext) -> List[str]:
return [f'is_range_checked (rc_bound F) {expr_desc}' for expr_desc in [(name if (rc_expr == expr) else f'({to_lean_description(expr=rc_expr, context=desc_ctx)})') for (rc_expr, _, _) in self.get_rc_checks(expr)]] |
def to_id(s):
if (s == '+'):
return 11
if (s == '*'):
return 12
return (int(s) + 1) |
.skipif((not has_pytorch()), reason='Pytorch not installed.')
_utils.test()
def test_device():
n = 12
X = ti.Matrix.field(3, 2, ti.f32, shape=(n, n, n))
assert (X.to_torch(device='cpu').device == torch.device('cpu'))
if torch.cuda.is_available():
assert (X.to_torch(device='cuda:0').device == torch.device('cuda:0')) |
class UnitCircleGroup(AbstractArgumentGroup):
Element = UnitCirclePoint
def _repr_(self):
return 'Unit Circle Group with Exponents in {} modulo ZZ'.format(self.base())
def _repr_short_(self):
from sage.rings.asymptotic.misc import parent_to_repr_short
s = parent_to_repr_short(self.base())
if (' ' in s):
s = '({})'.format(s)
return 'UU_{}'.format(s)
def _element_constructor_(self, data, exponent=None, **kwds):
from sage.groups.generic import discrete_log
import sage.rings.abc
from sage.rings.asymptotic.misc import combine_exceptions
from sage.rings.rational_field import QQ
if (exponent is None):
if (isinstance(data, int) and (data == 0)):
raise ValueError('no input specified')
elif isinstance(data, self.element_class):
if (data.parent() == self):
return data
exponent = data.exponent
elif ((data == 1) or (data == '1')):
exponent = 0
elif ((data == (- 1)) or (data == '-1')):
exponent = QQ((1, 2))
else:
try:
P = data.parent()
except AttributeError:
raise TypeError('{} is not in {}'.format(data, self))
if isinstance(P, SignGroup):
if data.is_one():
exponent = 0
elif data.is_minus_one():
exponent = QQ((1, 2))
elif isinstance(P, UnitCircleGroup):
exponent = data.exponent
elif isinstance(P, sage.rings.abc.NumberField_cyclotomic):
zeta = P.gen()
n = zeta.multiplicative_order()
try:
exponent = (QQ(discrete_log(data, zeta)) / QQ(n))
except ValueError as e:
raise combine_exceptions(ValueError('{} is not in {}'.format(data, self)), e)
if (exponent is None):
raise ValueError('{} is not in {}'.format(data, self))
elif ((not isinstance(data, int)) or (data != 0)):
raise ValueError('input is ambigous: {} as well as exponent={} specified'.format(data, exponent))
return self.element_class(self, exponent, **kwds)
def _create_element_in_extension_(self, exponent):
if (exponent.parent() is self.base()):
parent = self
else:
parent = ArgumentGroup(exponents=exponent.parent())
return parent(exponent=exponent)
def _coerce_map_from_(self, R):
if isinstance(R, UnitCircleGroup):
return self.base().has_coerce_map_from(R.base())
if isinstance(R, SignGroup):
return True |
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
Dumper.add_multi_representer(data_type, multi_representer) |
class DistributedDataParallelModel(DistributedDataParallel):
def gather(self, outputs, output_device):
return outputs |
.parametrize('packing_boundary,ext_type,keep_prompt_only_sequences,jsonl,gold_token_ids,gold_ttids', [(BoundaryType.JSONL, FileExtension.JSONL, True, {'prompt': '', 'completion': ''}, [], []), (BoundaryType.JSONL, FileExtension.JSONL, True, {'prompt': 'hi', 'completion': ''}, [[1, 0]], [[PROMPT, SEP]]), (BoundaryType.JSONL, FileExtension.JSONL, True, {'prompt': '', 'completion': 'bye'}, [[2, 0]], [[COMP, SEP]]), (BoundaryType.JSONL, FileExtension.JSONL, True, {'prompt': 'hi test', 'completion': 'bye test'}, [[1, 3, 2, 3, 0]], [[PROMPT, PROMPT, COMP, COMP, SEP]]), (BoundaryType.JSONL, FileExtension.JSONL, True, [{'prompt': 'hi', 'completion': 'bye'}], [[1, 2, 0]], [[PROMPT, COMP, SEP]]), (BoundaryType.JSONL, FileExtension.JSONL, True, [{'prompt': 'hi', 'completion': 'bye'}, {'prompt': 'test', 'completion': 'test'}], [[1, 2, 0, 3, 3, 0]], [[PROMPT, COMP, COMP, PROMPT, COMP, SEP]]), (BoundaryType.PROMPT_COMPLETION_PAIR, FileExtension.JSONL, True, {'prompt': '', 'completion': ''}, [], []), (BoundaryType.PROMPT_COMPLETION_PAIR, FileExtension.JSONL, True, {'prompt': 'hi', 'completion': ''}, [[1, 0]], [[PROMPT, SEP]]), (BoundaryType.PROMPT_COMPLETION_PAIR, FileExtension.JSONL, True, {'prompt': '', 'completion': 'bye'}, [[2, 0]], [[COMP, SEP]]), (BoundaryType.PROMPT_COMPLETION_PAIR, FileExtension.JSONL, True, {'prompt': 'hi test', 'completion': 'bye test'}, [[1, 3, 2, 3, 0]], [[PROMPT, PROMPT, COMP, COMP, SEP]]), (BoundaryType.PROMPT_COMPLETION_PAIR, FileExtension.JSONL, True, [{'prompt': 'hi', 'completion': 'bye'}], [[1, 2, 0]], [[PROMPT, COMP, SEP]]), (BoundaryType.PROMPT_COMPLETION_PAIR, FileExtension.JSONL, True, [{'prompt': 'hi', 'completion': 'bye'}, {'prompt': 'test', 'completion': 'test'}], [[1, 2, 0], [3, 3, 0]], [[PROMPT, COMP, COMP], [PROMPT, COMP, SEP]])])
def test_process_jsonl(article_tokenizer_for_prompt_sequences: ArticleTokenizer, jsonl: Union[(dict, List)], gold_token_ids: List[List[int]], gold_ttids: List[List[int]]):
tokenized_articles = article_tokenizer_for_prompt_sequences.process_jsonl(jsonl)
for (tokenized_article, gold_token, gold_ttid) in zip(tokenized_articles, gold_token_ids, gold_ttids):
assert (tokenized_article.dump_token_ids() == gold_token)
assert (tokenized_article.dump_token_type_ids() == gold_ttid) |
def exploded_plot(polyhedra, *, center=None, explosion_factor=1, sticky_vertices=False, sticky_center=True, point=None, **kwds):
from sage.plot.colors import rainbow
from sage.plot.graphics import Graphics
from sage.plot.line import line
from sage.plot.point import point as plot_point
import itertools
polyhedra = list(polyhedra)
g = Graphics()
if (not polyhedra):
return g
dim = polyhedra[0].ambient_dimension()
if (center is None):
from sage.rings.rational_field import QQ
center = vector(QQ, dim)
else:
center = vector(center)
translations = [(explosion_factor * ((p.center() + sum((r.vector() for r in p.rays()))) - center)) for p in polyhedra]
vertex_translations_dict = {}
for (P, t) in zip(polyhedra, translations):
for v in P.vertices():
v = v.vector()
v.set_immutable()
vertex_translations_dict[v] = vertex_translations_dict.get(v, [])
vertex_translations_dict[v].append((v + t))
color = kwds.get('color')
if (color == 'rainbow'):
cell_colors_dict = dict(zip(polyhedra, rainbow(len(polyhedra))))
for (p, t) in zip(polyhedra, translations):
options = copy(kwds)
if (color == 'rainbow'):
options['color'] = cell_colors_dict[p]
g += (p + t).plot(point=False, **options)
if (sticky_vertices or sticky_center):
if (sticky_vertices is True):
sticky_vertices = dict(color='gray')
if (sticky_center is True):
sticky_center = dict(color='gray')
for (vertex, vertex_translations) in vertex_translations_dict.items():
if (vertex == center):
if sticky_center:
for vt in vertex_translations:
g += line((center, vt), **sticky_center)
elif sticky_vertices:
for (vt1, vt2) in itertools.combinations(vertex_translations, 2):
g += line((vt1, vt2), **sticky_vertices)
if (point is None):
point = dict(size=10)
if (point is not False):
if (color == 'rainbow'):
vertex_colors_dict = dict(zip(vertex_translations_dict.keys(), rainbow(len(vertex_translations_dict.keys()))))
for (vertex, vertex_translations) in vertex_translations_dict.items():
options = copy(point)
if (color == 'rainbow'):
options['color'] = vertex_colors_dict[vertex]
g += plot_point(vertex_translations, **options)
return g |
def load_pkl_data(filename):
with open(filename, 'rb') as handle:
data_dict = pickle.load(handle)
return data_dict |
class SNResNetProjectionDiscriminator(chainer.Chain):
def __init__(self, ch=64, n_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.activation = activation
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedBlock(3, ch)
self.block2 = Block(ch, (ch * 2), activation=activation, downsample=True)
self.block3 = Block((ch * 2), (ch * 4), activation=activation, downsample=True)
self.block4 = Block((ch * 4), (ch * 4), activation=activation, downsample=True)
self.block5 = Block((ch * 4), (ch * 8), activation=activation, downsample=True)
self.block6 = Block((ch * 8), (ch * 8), activation=activation, downsample=False)
self.l7 = SNLinear((ch * 8), 1, initialW=initializer)
if (n_classes > 0):
self.l_y = SNEmbedID(n_classes, (ch * 8), initialW=initializer)
def __call__(self, x, y=None, get_feature=False, layer=6):
h = x
blocks = [self.block1, self.block2, self.block3, self.block4, self.block5, self.block6]
for l in range(len(blocks)):
if (get_feature and (l == layer)):
return F.flatten(h)
h = blocks[l](h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3))
if (get_feature and (layer == 6)):
return F.flatten(h)
output = self.l7(h)
if (y is not None):
w_y = self.l_y(y)
output += F.sum((w_y * h), axis=1, keepdims=True)
return output |
class TestGaussianMLPPolicies():
.parametrize('hidden_sizes', [(1,), (2,), (3,), (1, 4), (3, 5)])
def test_get_action(self, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = torch.ones(obs_dim, dtype=torch.float32)
init_std = 2.0
policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = policy(obs)[0]
expected_mean = torch.full((act_dim,), (obs_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
expected_variance = (init_std ** 2)
(action, prob) = policy.get_action(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(torch.full((act_dim,), expected_variance, dtype=torch.float))
assert (action.shape == (act_dim,))
.parametrize('hidden_sizes', [(1,), (2,), (3,), (1, 4), (3, 5)])
def test_get_action_np(self, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = np.ones(obs_dim, dtype=np.float32)
init_std = 2.0
policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = policy(torch.from_numpy(obs))[0]
expected_mean = torch.full((act_dim,), (obs_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
expected_variance = (init_std ** 2)
(action, prob) = policy.get_action(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(torch.full((act_dim,), expected_variance, dtype=torch.float))
assert (action.shape == (act_dim,))
.parametrize('batch_size, hidden_sizes', [(1, (1,)), (5, (3,)), (8, (4,)), (15, (1, 2)), (30, (3, 4, 10))])
def test_get_actions(self, batch_size, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)
init_std = 2.0
policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = policy(obs)[0]
expected_mean = torch.full([batch_size, act_dim], (obs_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
expected_variance = (init_std ** 2)
(action, prob) = policy.get_actions(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(torch.full((batch_size, act_dim), expected_variance, dtype=torch.float))
assert (action.shape == (batch_size, act_dim))
.parametrize('batch_size, hidden_sizes', [(1, (1,)), (5, (3,)), (8, (4,)), (15, (1, 2)), (30, (3, 4, 10))])
def test_get_actions_np(self, batch_size, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = np.ones((batch_size, obs_dim), dtype=np.float32)
init_std = 2.0
policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = policy(torch.from_numpy(obs))[0]
expected_mean = torch.full([batch_size, act_dim], (obs_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
expected_variance = (init_std ** 2)
(action, prob) = policy.get_actions(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(torch.full((batch_size, act_dim), expected_variance, dtype=torch.float))
assert (action.shape == (batch_size, act_dim))
.parametrize('batch_size, hidden_sizes', [(1, (1,)), (6, (3,)), (11, (6,)), (25, (3, 5)), (34, (2, 10, 11))])
def test_is_pickleable(self, batch_size, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)
init_std = 2.0
policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
(output1_action, output1_prob) = policy.get_actions(obs)
p = pickle.dumps(policy)
policy_pickled = pickle.loads(p)
(output2_action, output2_prob) = policy_pickled.get_actions(obs)
assert np.array_equal(output1_prob['mean'], output2_prob['mean'])
assert (output1_action.shape == output2_action.shape)
def test_get_action_dict_space(self):
env = GarageEnv(DummyDictEnv(obs_space_type='box', act_space_type='box'))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_nonlinearity=None, hidden_sizes=(1,), hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
obs = env.reset()
(action, _) = policy.get_action(obs)
assert (env.action_space.shape == action.shape)
(actions, _) = policy.get_actions(np.array([obs, obs]))
for action in actions:
assert (env.action_space.shape == action.shape)
(actions, _) = policy.get_actions(np.array([obs, obs]))
for action in actions:
assert (env.action_space.shape == action.shape) |
class PartialConversionElement(SageObject):
def __init__(self, growth_group, raw_element):
self.growth_group = growth_group
self.raw_element = raw_element
def _repr_(self):
from sage.structure.element import parent
return 'element with parameter {} ({}) in {}'.format(self.raw_element, parent(self.raw_element), self.growth_group)
def split(self):
(raw_here, raw_other) = self.growth_group._split_raw_element_(self.raw_element)
try:
here = self.growth_group.element_class(self.growth_group, raw_here)
except PartialConversionValueError as e:
from .misc import combine_exceptions
raise combine_exceptions(ValueError('cannot split {}'.format(self)), e)
other = PartialConversionElement(self.growth_group, raw_other)
return (here, other)
def is_compatible(self, other):
return self.growth_group.is_compatible(other) |
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--batch_size', type=int, default=128, help='batch_size')
parser.add_argument('--num_workers', type=int, default=16, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=100, help='number of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.1, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--model', type=str, default='resnet18')
parser.add_argument('--dataset', type=str, default='celeba', choices=['celeba', 'utkface'], help='dataset')
parser.add_argument('--data_folder', type=str, default=None, help='path to dataset')
parser.add_argument('--mean', type=str, help='mean of dataset in path in form of str tuple')
parser.add_argument('--std', type=str, help='std of dataset in path in form of str tuple')
parser.add_argument('--size', type=int, default=128, help='parameter for RandomResizedCrop')
parser.add_argument('--name', type=str, default='', help='saved filename')
parser.add_argument('--ckpt', type=str, default='', help='path to pre-trained model')
parser.add_argument('--method', type=str, default='FSCL', choices=['FSCL', 'FSCL*', 'SupCon', 'SimCLR'], help='choose method')
parser.add_argument('--group_norm', type=int, default=0, help='group normalization')
parser.add_argument('--temp', type=float, default=0.1, help='temperature for loss function')
parser.add_argument('--cosine', action='store_true', help='using cosine annealing')
parser.add_argument('--syncBN', action='store_true', help='using synchronized batch normalization')
parser.add_argument('--warm', action='store_true', help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0', help='id for recording multiple runs')
parser.add_argument('--target_attribute_1', type=str, default='', help='target attribute')
parser.add_argument('--target_attribute_2', type=str, default='None', help='target attribute')
parser.add_argument('--sensitive_attribute_1', type=str, default='', help='sensitive_attribute')
parser.add_argument('--sensitive_attribute_2', type=str, default='None', help='sensitive_attribute')
opt = parser.parse_args()
if (opt.data_folder is None):
opt.data_folder = './datasets/'
opt.model_path = './save/FairSupCon/{}_models'.format(opt.dataset)
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.format(opt.method, opt.dataset, opt.model, opt.learning_rate, opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
if (opt.batch_size > 256):
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = (opt.learning_rate * (opt.lr_decay_rate ** 3))
opt.warmup_to = (eta_min + (((opt.learning_rate - eta_min) * (1 + math.cos(((math.pi * opt.warm_epochs) / opt.epochs)))) / 2))
else:
opt.warmup_to = opt.learning_rate
opt.save_folder = os.path.join(opt.model_path, opt.model_name, opt.name)
if (not os.path.isdir(opt.save_folder)):
os.makedirs(opt.save_folder)
return opt |
def test_columnar_convert_ndarray():
converter = ColumnarConverter('some_name', 'foo', None, column_defaults={}, selected_columns={}, transform_columns={})
arr1 = np.random.rand(3, 4, 5)
arr2 = np.random.rand(6, 7)
(ids, columns, type_info) = converter.convert(arr1)
assert (ids == range(3))
assert (columns == {})
_check_type_info(type_info, [('foo', arr1)])
assert (type_info[0][1] is arr1)
(ids, columns, type_info) = converter.convert({'a': arr1, 'b': arr2})
np.testing.assert_array_equal(ids, [*range(3), *range(6)])
assert (columns == {})
_check_type_info(type_info, [('a', arr1), ('b', arr2)])
assert (type_info[0][1] is arr1)
assert (type_info[1][1] is arr2)
with pytest.raises(ValueError, match="some_name\\['foo'\\]: could not convert NumPy array"):
converter.convert(np.zeros(123)) |
def osnet_avgpool(num_classes=1000, loss='softmax', **kwargs):
return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, pool='avg', **kwargs) |
def stateDmodel(init_type='normal', init_gain=0.02, gpu_id='cuda:0'):
net = StateD()
return init_net(net, init_type, init_gain, gpu_id) |
class FrozenBatchNorm2d(nn.Module):
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer('weight', torch.ones(n))
self.register_buffer('bias', torch.zeros(n))
self.register_buffer('running_mean', torch.zeros(n))
self.register_buffer('running_var', torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = (prefix + 'num_batches_tracked')
if (num_batches_tracked_key in state_dict):
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
w = self.weight.reshape(1, (- 1), 1, 1)
b = self.bias.reshape(1, (- 1), 1, 1)
rv = self.running_var.reshape(1, (- 1), 1, 1)
rm = self.running_mean.reshape(1, (- 1), 1, 1)
eps = 1e-05
scale = (w * (rv + eps).rsqrt())
bias = (b - (rm * scale))
return ((x * scale) + bias) |
def NLR_analysis(NLR):
try:
if (NLR < 0.1):
return 'Good'
if ((NLR >= 0.1) and (NLR < 0.2)):
return 'Fair'
if ((NLR >= 0.2) and (NLR < 0.5)):
return 'Poor'
return 'Negligible'
except Exception:
return 'None' |
def _batch_shuffle(index_array, batch_size):
batch_count = int((len(index_array) / batch_size))
last_batch = index_array[(batch_count * batch_size):]
index_array = index_array[:(batch_count * batch_size)]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch) |
(scope='module')
def basic_multilingual():
return Pipeline(dir=TEST_MODELS_DIR, lang='multilingual', processors='langid') |
class ESPNet(nn.Module):
def __init__(self, classes=4, channels=1):
super().__init__()
self.input1 = InputProjectionA(1)
self.input2 = InputProjectionA(1)
initial = 16
config = [32, 128, 256, 256]
reps = [2, 2, 3]
self.level0 = CBR(channels, initial, 7, 2)
self.level1 = nn.ModuleList()
for i in range(reps[0]):
if (i == 0):
self.level1.append(DilatedParllelResidualBlockB1(initial, config[0]))
else:
self.level1.append(DilatedParllelResidualBlockB1(config[0], config[0]))
self.level2 = DilatedParllelResidualBlockB1(config[0], config[1], stride=2)
self.level_2 = nn.ModuleList()
for i in range(0, reps[1]):
self.level_2.append(DilatedParllelResidualBlockB1(config[1], config[1]))
self.level3_0 = DilatedParllelResidualBlockB1(config[1], config[2], stride=2)
self.level_3 = nn.ModuleList()
for i in range(0, reps[2]):
self.level_3.append(DilatedParllelResidualBlockB1(config[2], config[2]))
self.up_l3_l2 = UpSampler(config[2], config[1])
self.merge_l2 = DilatedParllelResidualBlockB1((2 * config[1]), config[1])
self.dec_l2 = nn.ModuleList()
for i in range(0, reps[0]):
self.dec_l2.append(DilatedParllelResidualBlockB1(config[1], config[1]))
self.up_l2_l1 = UpSampler(config[1], config[0])
self.merge_l1 = DilatedParllelResidualBlockB1((2 * config[0]), config[0])
self.dec_l1 = nn.ModuleList()
for i in range(0, reps[0]):
self.dec_l1.append(DilatedParllelResidualBlockB1(config[0], config[0]))
self.dec_l1.append(CBR(config[0], classes, 3, 1))
self.dec_l1.append(ASPBlock(classes, classes))
self.pspModules = nn.ModuleList()
scales = [0.2, 0.4, 0.6, 0.8]
for sc in scales:
self.pspModules.append(PSPDec(classes, classes, sc))
self.classifier = self.classifier = nn.Sequential(CBR(((len(scales) + 1) * classes), classes, 3, 1), ASPBlock(classes, classes), nn.Upsample(scale_factor=2), CBR(classes, classes, 7, 1), C(classes, classes, 1, 1))
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = (((m.kernel_size[0] * m.kernel_size[1]) * m.kernel_size[2]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if isinstance(m, nn.ConvTranspose3d):
n = (((m.kernel_size[0] * m.kernel_size[1]) * m.kernel_size[2]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input1, inp_res=(128, 128, 128), inpSt2=False):
dim0 = input1.size(2)
dim1 = input1.size(3)
dim2 = input1.size(4)
if (self.training or (inp_res is None)):
inp_res = ((math.ceil((dim0 / 8)) * 8), (math.ceil((dim1 / 8)) * 8), (math.ceil((dim2 / 8)) * 8))
if inp_res:
input1 = F.adaptive_avg_pool3d(input1, output_size=inp_res)
out_l0 = self.level0(input1)
for (i, layer) in enumerate(self.level1):
if (i == 0):
out_l1 = layer(out_l0)
else:
out_l1 = layer(out_l1)
out_l2_down = self.level2(out_l1)
for (i, layer) in enumerate(self.level_2):
if (i == 0):
out_l2 = layer(out_l2_down)
else:
out_l2 = layer(out_l2)
del out_l2_down
out_l3_down = self.level3_0(out_l2)
for (i, layer) in enumerate(self.level_3):
if (i == 0):
out_l3 = layer(out_l3_down)
else:
out_l3 = layer(out_l3)
del out_l3_down
dec_l3_l2 = self.up_l3_l2(out_l3)
merge_l2 = self.merge_l2(torch.cat([dec_l3_l2, out_l2], 1))
for (i, layer) in enumerate(self.dec_l2):
if (i == 0):
dec_l2 = layer(merge_l2)
else:
dec_l2 = layer(dec_l2)
dec_l2_l1 = self.up_l2_l1(dec_l2)
merge_l1 = self.merge_l1(torch.cat([dec_l2_l1, out_l1], 1))
for (i, layer) in enumerate(self.dec_l1):
if (i == 0):
dec_l1 = layer(merge_l1)
else:
dec_l1 = layer(dec_l1)
psp_outs = dec_l1.clone()
for layer in self.pspModules:
out_psp = layer(dec_l1)
psp_outs = torch.cat([psp_outs, out_psp], 1)
decoded = self.classifier(psp_outs)
return F.upsample(decoded, size=(dim0, dim1, dim2), mode='trilinear') |
_properties
class LibraryNode(CodeNode):
name = Property(dtype=str, desc='Name of node')
implementation = LibraryImplementationProperty(dtype=str, allow_none=True, desc='Which implementation this library node will expand into.Must match a key in the list of possible implementations.')
schedule = EnumProperty(dtype=dtypes.ScheduleType, desc='If set, determines the default device mapping of the node upon expansion, if expanded to a nested SDFG.', default=dtypes.ScheduleType.Default)
debuginfo = DebugInfoProperty()
def __init__(self, name, *args, schedule=None, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
self.label = name
self.schedule = (schedule or dtypes.ScheduleType.Default)
def __jsontype__(self):
return 'LibraryNode'
def has_side_effects(self) -> bool:
return False
def to_json(self, parent):
jsonobj = super().to_json(parent)
jsonobj['classpath'] = full_class_path(self)
return jsonobj
def from_json(cls, json_obj, context=None):
if (cls == LibraryNode):
clazz = pydoc.locate(json_obj['classpath'])
if (clazz is None):
warnings.warn(f"""Could not find class "{json_obj['classpath']}" while deserializing. Falling back to UnregisteredLibraryNode.""")
return UnregisteredLibraryNode.from_json(json_obj, context)
return clazz.from_json(json_obj, context)
else:
ret = cls.__new__(cls)
dace.serialize.set_properties_from_json(ret, json_obj, context=context)
return ret
def expand(self, sdfg, state, *args, **kwargs) -> str:
from dace.transformation.transformation import ExpandTransformation
implementation = self.implementation
library_name = getattr(type(self), '_dace_library_name', '')
try:
if library_name:
config_implementation = Config.get('library', library_name, 'default_implementation')
else:
config_implementation = None
except KeyError:
config_implementation = None
if (config_implementation is not None):
try:
config_override = Config.get('library', library_name, 'override')
if (config_override and (implementation in self.implementations)):
if (implementation is not None):
warnings.warn('Overriding explicitly specified implementation {} for {} with {}.'.format(implementation, self.label, config_implementation))
implementation = config_implementation
except KeyError:
config_override = False
if (implementation is None):
implementation = type(self).default_implementation
if (implementation is None):
import dace.library
lib = dace.library._DACE_REGISTERED_LIBRARIES[type(self)._dace_library_name]
implementation = lib.default_implementation
if (implementation is None):
implementation = config_implementation
if (implementation is None):
raise ValueError('No implementation or default implementation specified.')
if (implementation not in self.implementations.keys()):
raise KeyError('Unknown implementation for node {}: {}'.format(type(self).__name__, implementation))
transformation_type = type(self).implementations[implementation]
sdfg_id = sdfg.sdfg_id
state_id = sdfg.nodes().index(state)
subgraph = {transformation_type._match_node: state.node_id(self)}
transformation: ExpandTransformation = transformation_type()
transformation.setup_match(sdfg, sdfg_id, state_id, subgraph, 0)
if (not transformation.can_be_applied(state, 0, sdfg)):
raise RuntimeError('Library node expansion applicability check failed.')
sdfg.append_transformation(transformation)
transformation.apply(state, sdfg, *args, **kwargs)
return implementation
def register_implementation(cls, name, transformation_type):
cls.implementations[name] = transformation_type
transformation_type._match_node = cls
def free_symbols(self) -> Set[str]:
fsyms = super(LibraryNode, self).free_symbols
for (p, v) in self.properties():
if (isinstance(p, SymbolicProperty) and issymbolic(v)):
fsyms.update((str(s) for s in v.free_symbols))
return fsyms |
def _count_ji(state: GameState, color: int, size: int):
board = jnp.zeros_like(state.chain_id_board)
board = jnp.where(((state.chain_id_board * color) > 0), 1, board)
board = jnp.where(((state.chain_id_board * color) < 0), (- 1), board)
neighbours = _neighbours(size)
def is_opp_neighbours(b):
return ((b == 0) & ((b[neighbours.flatten()] == (- 1)).reshape((size ** 2), 4) & (neighbours != (- 1))).any(axis=1))
def fill_opp(x):
(b, _) = x
mask = is_opp_neighbours(b)
return (jnp.where(mask, (- 1), b), mask.any())
(b, _) = jax.lax.while_loop((lambda x: x[1]), fill_opp, (board, True))
return (b == 0).sum() |
class MultiTaskLoss(MultiTaskMetric):
def __init__(self, loss_fn, name=None):
self.loss_fn = loss_fn
if (name is None):
name = 'loss'
super().__init__(name=name)
def _compute_flattened(self, flattened_y_pred, flattened_y_true):
if isinstance(self.loss_fn, torch.nn.BCEWithLogitsLoss):
flattened_y_pred = flattened_y_pred.float()
flattened_y_true = flattened_y_true.float()
elif isinstance(self.loss_fn, torch.nn.CrossEntropyLoss):
flattened_y_true = flattened_y_true.long()
flattened_loss = self.loss_fn(flattened_y_pred, flattened_y_true)
return flattened_loss
def worst(self, metrics):
return maximum(metrics) |
def cauchy_naive(v, z, w, conj=True):
if conj:
v = _conj(v)
w = _conj(w)
cauchy_matrix = (v.unsqueeze((- 1)) / (z.unsqueeze((- 2)) - w.unsqueeze((- 1))))
return torch.sum(cauchy_matrix, dim=(- 2)) |
class FashionMNIST(torchvision.datasets.FashionMNIST):
def __init__(self, root, part, labeled_factors, transform):
super().__init__(root, (part == 'train'), transform=transform, download=True)
if (len(labeled_factors) == 0):
self.has_label = False
self.nclass = []
self.class_freq = []
else:
self.has_label = True
self.nclass = [10]
class_count = self.targets.bincount(minlength=10)
self.class_freq = [(class_count.float() / self.data.size(0))]
def __getitem__(self, k):
(img, target) = super().__getitem__(k)
return ((img, torch.tensor([target])) if self.has_label else img) |
class Broadcast(Function):
def forward(ctx, target_gpus, *inputs):
assert all(map((lambda i: (i.device.type != 'cpu')), inputs)), 'Broadcast function not implemented for CPU tensors'
target_gpus = list(map((lambda x: _get_device_index(x, True)), target_gpus))
ctx.target_gpus = target_gpus
if (len(inputs) == 0):
return tuple()
ctx.num_inputs = len(inputs)
ctx.input_device = inputs[0].get_device()
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
non_differentiables = []
for (idx, input_requires_grad) in enumerate(ctx.needs_input_grad[1:]):
if (not input_requires_grad):
for output in outputs:
non_differentiables.append(output[idx])
ctx.mark_non_differentiable(*non_differentiables)
return tuple([t for tensors in outputs for t in tensors])
def backward(ctx, *grad_outputs):
return ((None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)) |
class RootVisitor(NodeVisitor):
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = visit_Block = visit_Macro = visit_FilterBlock = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
for child in node.iter_child_nodes(exclude=('call',)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(self, node, for_branch='body', **kwargs):
if (for_branch == 'body'):
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif (for_branch == 'else'):
branch = node.else_
elif (for_branch == 'test'):
self.sym_visitor.visit(node.target, store_as_param=True)
if (node.test is not None):
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError('Unknown for branch')
for item in (branch or ()):
self.sym_visitor.visit(item)
def visit_With(self, node, **kwargs):
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
raise NotImplementedError(('Cannot find symbols for %r' % node.__class__.__name__)) |
def simulator(theta, n_obs=None, scale=0.1, rng=None):
if (rng is None):
rng = np.random.default_rng()
if (n_obs is None):
return rng.normal(loc=theta, scale=scale)
x = rng.normal(loc=theta, scale=scale, size=(n_obs, theta.shape[0], theta.shape[1]))
return np.transpose(x, (1, 0, 2)) |
class RESetMPExample(RESetMapReduce):
def __init__(self, maxl=9):
RESetMapReduce.__init__(self)
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring import polygen
self.x = polygen(ZZ, 'x')
self.maxl = maxl
def roots(self):
return [[]]
def children(self, l):
return ([((l[:i] + [len(l)]) + l[i:]) for i in range((len(l) + 1))] if (len(l) < self.maxl) else [])
def map_function(self, l):
return (self.x ** len(l)) |
def init_local_group(node_rank: int, num_gpus_per_node: int):
global _LOCAL_PROCESS_GROUP
assert (_LOCAL_PROCESS_GROUP is None)
ranks = list(range((node_rank * num_gpus_per_node), ((node_rank + 1) * num_gpus_per_node)))
_LOCAL_PROCESS_GROUP = torch_dist.new_group(ranks) |
def main():
parser = argparse.ArgumentParser(description='Calculate QoE and error for PanoSalNet algorithm')
parser.add_argument('-D', '--dataset', type=int, required=True, help='Dataset ID (1 or 2)')
parser.add_argument('-T', '--topic', required=True, help='Topic in the particular Dataset (video name)')
parser.add_argument('--fps', type=int, required=True, help='fps of the video')
parser.add_argument('-Q', '--quality', required=True, help='Preferred bitrate quality of the video (360p, 480p, 720p, 1080p, 1440p)')
args = parser.parse_args()
if ((args.dataset != 1) and (args.dataset != 2)):
print('Incorrect value of the Dataset ID provided!!...')
print('======= EXIT ')
exit()
print('Reading JSON...')
file = open('./meta.json')
jsonRead = json.load(file)
nusers = jsonRead['dataset'][(args.dataset - 1)]['nusers']
width = jsonRead['dataset'][(args.dataset - 1)]['width']
height = jsonRead['dataset'][(args.dataset - 1)]['height']
view_width = jsonRead['dataset'][(args.dataset - 1)]['view_width']
view_height = jsonRead['dataset'][(args.dataset - 1)]['view_height']
milisec = jsonRead['dataset'][(args.dataset - 1)]['milisec']
pref_bitrate = jsonRead['bitrates'][args.quality]
ncol_tiles = jsonRead['ncol_tiles']
nrow_tiles = jsonRead['nrow_tiles']
player_width = jsonRead['player_width']
player_height = jsonRead['player_height']
player_tiles_x = math.ceil((((player_width * ncol_tiles) * 1.0) / width))
player_tiles_y = math.ceil((((player_height * nrow_tiles) * 1.0) / height))
PATH_ACT = '../../Viewport/ds{}/'.format(args.dataset)
PATH_PRED = './head_prediction/ds{}/'.format(args.dataset)
(manhattan_error, x_mae, y_mae, final_qoe) = ([], [], [], [])
count_frames = 0
for usernum in range(nusers):
print('User_{}'.format(usernum))
user_manhattan_error = 0.0
viewport = pickle.load(open((PATH_ACT + 'viewport_ds{}_topic{}_user{}'.format(dataset, topic, (usernum + 1))), 'rb'), encoding='latin1')
p_viewport = pickle.load(open((PATH_PRED + 'topic{}_user{}'.format(topic, usernum)), 'rb'), encoding='latin1')
frame_nos = []
(act_viewport, frame_nos, max_frame) = get_act_tiles(viewport, frame_nos, args.fps, args.milisec, width, height, view_width, view_height)
pred_max_viewport = []
for fr in range(len(p_viewport)):
prob = p_viewport[fr]
argmax = np.where((prob == prob.max()))
pred_max_viewport.append((argmax[0][0], argmax[1][0]))
pred_viewport = p_viewport
act_viewport = act_viewport[:len(pred_viewport)]
frame_nos = frame_nos[:len(pred_viewport)]
pred_viewport = pred_viewport[:len(act_viewport)]
frame_nos = frame_nos[:len(pred_viewport)]
for fr in range(len(pred_max_viewport)):
act_tile = act_viewport[fr]
pred_tile = pred_max_viewport[fr]
tile_col_dif = ncol_tiles
tile_row_dif = (act_tile[0] - pred_tile[0])
tile_col_dif = (min((pred_tile[1] - act_tile[1]), ((act_tile[1] + ncol_tiles) - pred_tile[1])) if (act_tile[1] < pred_tile[1]) else min((act_tile[1] - pred_tile[1]), ((ncol_tiles + pred_tile[1]) - act_tile[1])))
current_tile_error = (abs(tile_row_dif) + abs(tile_col_dif))
user_manhattan_error += current_tile_error
manhattan_error.append((user_manhattan_error / len(pred_max_viewport)))
count_frames += len(act_viewport)
(act_tiles, pred_tiles, chunk_frames) = get_chunks(act_viewport, pred_viewport, frame_nos, max_frame, args.fps)
vid_bitrate = alloc_bitrate(pred_tiles, chunk_frames, nrow_tiles, ncol_tiles, pref_bitrate)
q = calc_qoe(vid_bitrate, act_tiles, frame_nos, chunk_frames, width, height, nrow_tiles, ncol_tiles, player_width, player_height)
final_qoe.append(q)
avg_qoe = np.mean(final_qoe)
avg_manhattan_error = np.mean(manhattan_error)
print('\n======= RESULTS ')
print('PanoSalNet')
print('Dataset: {}'.format(args.dataset))
print(('Topic: ' + args.topic))
print('Pred_nframe: {}'.format(args.fps))
print('Avg. QoE: {}'.format(avg_qoe))
print('Avg. Manhattan error: {}'.format(avg_manhattan_error))
print('Count: {}'.format(count_frames))
print('\n\n') |
class Dataset():
def preprocess_image(img):
img /= 255.0
return img
def get_class_labels(file_path):
line_seperator = '\n'
file_contents = tf.io.read_file(file_path)
file_contents = tf.expand_dims(file_contents, axis=(- 1))
class_labels = tf.strings.split(file_contents, sep=line_seperator)
class_labels = class_labels.values[:(- 1)]
return class_labels
def __init__(self, cl_path, dataset_path, image_size, batch_size, shuffle=True):
(self.dataset_path, self.image_size, self.batch_size, self.shuffle) = [value for value in (dataset_path, image_size, batch_size, shuffle)]
self.class_labels = Dataset.get_class_labels(cl_path)
with tf.Session() as sess:
self.num_classes = sess.run(tf.shape(self.class_labels)[0])
self.data = self.get_dataset()
def get_image_and_class(self, image, classl):
classl = tf.math.equal(self.class_labels, classl)
classl = tf.cast(classl, tf.int32)
classl = tf.argmax(classl, axis=(- 1))
classl = tf.one_hot(classl, self.num_classes)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_image_with_pad(image, self.image_size[0], self.image_size[1])
image = tf.cast(image, tf.float32)
image = Dataset.preprocess_image(image)
return (image, classl)
def read_tfrec(self, example):
feature = {'image': tf.io.FixedLenFeature([], tf.string), 'class': tf.io.FixedLenFeature([], tf.string)}
example = tf.parse_single_example(example, feature)
return self.get_image_and_class(example['image'], example['class'])
def get_dataset(self):
cycle_length = 32
prefetch_size = 1
option = tf.data.Options()
option.experimental_deterministic = False
ds = tf.data.Dataset.list_files((self.dataset_path + '/*.tfrec'))
ds = ds.with_options(option)
ds = ds.interleave(tf.data.TFRecordDataset, cycle_length=cycle_length, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(self.read_tfrec, tf.data.experimental.AUTOTUNE)
if self.shuffle:
ds = ds.shuffle(SHUFFLE_BUFFER)
ds = ds.repeat()
ds = ds.batch(self.batch_size, drop_remainder=True)
return ds.prefetch(prefetch_size) |
class _DistributedRequest(object):
def __init__(self, request):
self.request = request
def is_completed(self):
return torch._C._dist_request_is_completed(self.request)
def wait(self):
torch._C._dist_request_wait(self.request) |
def __generate_host_torrc(host_path, torrc_defaults):
with open(f'{host_path}/{TORRC_HOST_FILENAME}', 'w') as outf:
outf.write('# Enter any host-specific tor config options here.\n')
outf.write(f'''# Note that any option specified here may override a default from {TORRC_DEFAULTS_HOST_FILENAME}.
''')
with open(f'{host_path}/{TORRC_DEFAULTS_HOST_FILENAME}', 'w') as outf:
outf.write('# The following files specify default tor config options for this host.\n')
outf.write(f'''%include {get_host_rel_conf_path(TORRC_COMMON_FILENAME)}
''')
for fname in torrc_defaults['includes']:
outf.write(f'''%include {get_host_rel_conf_path(fname)}
''')
if ('bandwidth_rate' in torrc_defaults):
outf.write(f'''BandwidthRate {torrc_defaults['bandwidth_rate']}
''')
if ('bandwidth_burst' in torrc_defaults):
outf.write(f'''BandwidthBurst {torrc_defaults['bandwidth_burst']}
''') |
def pddl_to_sas(task):
with timers.timing('Instantiating', block=True):
(relaxed_reachable, atoms, actions, goal_list, axioms, reachable_action_params) = instantiate.explore(task)
if (not relaxed_reachable):
return unsolvable_sas_task('No relaxed solution')
elif (goal_list is None):
return unsolvable_sas_task('Trivially false goal')
for item in goal_list:
assert isinstance(item, pddl.Literal)
with timers.timing('Computing fact groups', block=True):
(groups, mutex_groups, translation_key) = fact_groups.compute_groups(task, atoms, reachable_action_params)
with timers.timing('Building STRIPS to SAS dictionary'):
(ranges, strips_to_sas) = strips_to_sas_dictionary(groups, assert_partial=options.use_partial_encoding)
with timers.timing('Building dictionary for full mutex groups'):
(mutex_ranges, mutex_dict) = strips_to_sas_dictionary(mutex_groups, assert_partial=False)
if options.add_implied_preconditions:
with timers.timing('Building implied facts dictionary...'):
implied_facts = build_implied_facts(strips_to_sas, groups, mutex_groups)
else:
implied_facts = {}
with timers.timing('Building mutex information', block=True):
if options.use_partial_encoding:
mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
else:
print('using full encoding: between-variable mutex information skipped.')
mutex_key = []
with timers.timing('Translating task', block=True):
sas_task = translate_task(strips_to_sas, ranges, translation_key, mutex_dict, mutex_ranges, mutex_key, task.init, goal_list, actions, axioms, task.use_min_cost_metric, implied_facts)
print(('%d effect conditions simplified' % simplified_effect_condition_counter))
print(('%d implied preconditions added' % added_implied_precondition_counter))
if options.filter_unreachable_facts:
with timers.timing('Detecting unreachable propositions', block=True):
try:
simplify.filter_unreachable_propositions(sas_task)
except simplify.Impossible:
return unsolvable_sas_task('Simplified to trivially false goal')
except simplify.TriviallySolvable:
return solvable_sas_task('Simplified to empty goal')
if (options.reorder_variables or options.filter_unimportant_vars):
with timers.timing('Reordering and filtering variables', block=True):
variable_order.find_and_apply_variable_order(sas_task, options.reorder_variables, options.filter_unimportant_vars)
return sas_task |
def separate_pipeline(pipeline_path):
checkpoint = torch.load(pipeline_path, map_location=torch.device('cpu'))
main_keys = {k.split('.')[0] for k in checkpoint['model_state'].keys()}
if ('_fusion_network' in main_keys):
separate = True
else:
separate = False
if separate:
(outdir, file) = os.path.split(pipeline_path)
path = os.path.join(outdir, 'pipeline', file)
if (not os.path.exists(os.path.dirname(path))):
os.mkdir(os.path.dirname(path))
torch.save(checkpoint, path)
model_state = checkpoint['model_state']
model_state = {k.replace('_fusion_network.', ''): v for (k, v) in model_state.items() if k.startswith('_fusion_network.')}
checkpoint['model_state'] = model_state
checkpoint.pop('pipeline_state', None)
torch.save(checkpoint, pipeline_path)
print('Extracted fusion model from pipeline and saved separately.') |
class LocalDataset(BaseDataset):
def __init__(self, cfg):
super().__init__(cfg)
self.cfg = cfg
data_dir = cfg.DATA_DIR
ext = getattr(cfg, 'EXT', '.png,.jpg,.jpeg')
self.images = []
for ext_type in ext.split(','):
self.images.extend(glob.glob(os.path.join(data_dir, '**', f'*{ext_type}'), recursive=True))
def __getitem__(self, index):
image_path = self.images[index]
image = Image.open(image_path)
image = image.convert('RGB')
input_dict = {'image': image}
augmented = self.augmentations(**input_dict)
output_dict = {'filename': image_path}
for (k, v) in augmented.items():
v = torch.tensor(np.asarray(v))
output_dict.update({k: (v.unsqueeze(0) if (v.ndim == 2) else v)})
return output_dict
def __len__(self):
return len(self.images) |
class NetworkConfig(BaseConfig):
units: Sequence[int]
activation_fn: str
dropout_prob: float
use_batch_norm: bool = True
def to_str(self):
ustr = '-'.join([str(int(i)) for i in self.units])
dstr = f'dp-{self.dropout_prob:2.1f}'
bstr = f'bn-{self.use_batch_norm}'
return '-'.join(['net', '_'.join([ustr, dstr, bstr])]) |
class InferenNet_fast(nn.Module):
def __init__(self, kernel_size, obj_id, dataset):
super(InferenNet_fast, self).__init__()
allpaths = ['NULL', 'seq1_model', 'seq2_model', 'NULL', 'seq4_model', 'seq5_model', 'seq6_model', 'NULL', 'seq8_model', 'seq9_model', 'Semmetry_obj10', 'seq11_model', 'seq12_model', 'seq13_model', 'seq14_model', 'seq15_model']
model = createModel().cuda()
path = (('./exp/final_model/' + allpaths[obj_id]) + '.pkl')
print('Loading pose model from {}'.format(path))
model.load_state_dict(torch.load(path))
model.eval()
self.pyranet = model
self.dataset = dataset
def forward(self, x):
out = self.pyranet(x)
out = out.narrow(1, 0, 50)
return out |
def curl(vf: ti.template(), cf: ti.template()):
for (i, j) in vf:
cf[(i, j)] = (0.5 * ((vf[((i + 1), j)][1] - vf[((i - 1), j)][1]) - (vf[(i, (j + 1))][0] - vf[(i, (j - 1))][0]))) |
def activation_count_operators(model: nn.Module, inputs: list, **kwargs) -> typing.DefaultDict[(str, float)]:
return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs) |
def eval_var(ref, sys):
sum_ = 0
cnt = 0
with open(ref) as fref, open(sys) as fsys:
for (lr, ls) in zip(fref, fsys):
bucket_r = len(lr.strip().split())
bucket_s = len(ls.strip().split())
sum_ += ((bucket_r - bucket_s) ** 2)
cnt += 1
var = ((0.001 * sum_) / cnt)
print(f'var: {var}')
return var |
class _Config():
def __init__(self):
self.emb_size = 200
self.col_emb_size = 300
self.hidden_size = 400
self.batch_size = 10
self.epoch = 600
self.dropout = 0.5
self.num_layers = 2
self.learning_rate = 0.0001
self.toy = False
self.train_emb = False
self.history_type = 'full'
self.nogpu = False
self.table_type = 'std'
def _char_init(self):
self.data_root = './data/char/generated_datasets'
self.sep_emb = './embedding/char/separate_emb.txt'
self.comb_emb = './embedding/char/combine_emb.txt'
def _word_init(self):
self.data_root = './data/word/generated_datasets'
self.sep_emb = './embedding/word/separate_emb.txt'
self.comb_emb = './embedding/word/combine_emb.txt' |
def parseArgs():
args = TrainOptions().parse()
args.output_channels = OUTPUT_CHANNELS
args.img_width = IMG_WIDTH
args.img_height = IMG_HEIGHT
return args |
def sinc1_dt_rt(t):
e = 0.01
r = torch.zeros_like(t)
a = torch.abs(t)
s = (a < e)
c = (s == 0)
t2 = (t ** 2)
r[s] = (((- 1) / 3) * (1 - ((t2[s] / 10) * (1 - ((t2[s] / 28) * (1 - (t2[s] / 54)))))))
r[c] = (((cos(t[c]) / t[c]) - (sin(t[c]) / t2[c])) / t[c])
return r |
def _get_predictor(args: argparse.Namespace, predictors: Dict[(str, str)]) -> Predictor:
archive = load_archive(args.archive_file, cuda_device=args.cuda_device, overrides=args.overrides)
model_type = archive.config.get('model').get('type')
if (model_type not in predictors):
raise ConfigurationError('no known predictor for model type {}'.format(model_type))
predictor = Predictor.from_archive(archive, predictors[model_type])
return predictor |
class RobustService(object):
CHECK_ALIVE_TIMEOUT = 120
def __init__(self, start_cmd, stop_cmd, endpoint, stdout=None, stderr=None, be_quiet=False, host=None, port=None, ignore_binding_error=False):
self.start_cmd = (start_cmd and shlex.split(start_cmd))
self.stop_cmd = (stop_cmd and shlex.split(stop_cmd))
self.endpoint = endpoint
self.stdout = stdout
self.stderr = stderr
self.server = None
self.is_active = False
self.be_quiet = be_quiet
self.host = host
self.port = port
self.ignore_binding_error = ignore_binding_error
atexit.register(self.atexit_kill)
def is_alive(self):
try:
if ((not self.ignore_binding_error) and (self.server is not None) and (self.server.poll() is not None)):
return False
return requests.get((self.endpoint + '/ping')).ok
except requests.exceptions.ConnectionError as e:
raise ShouldRetryException(e)
def start(self):
if self.start_cmd:
if (self.host and self.port):
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
try:
sock.bind((self.host, self.port))
except socket.error as e:
if self.ignore_binding_error:
logger.info(f'Connecting to existing CoreNLP server at {self.host}:{self.port}')
self.server = None
return
else:
raise PermanentlyFailedException(('Error: unable to start the CoreNLP server on port %d (possibly something is already running there)' % self.port)) from e
if self.be_quiet:
if hasattr(subprocess, 'DEVNULL'):
stderr = subprocess.DEVNULL
else:
stderr = open(os.devnull, 'w')
stdout = stderr
else:
stdout = self.stdout
stderr = self.stderr
logger.info(f"Starting server with command: {' '.join(self.start_cmd)}")
try:
self.server = subprocess.Popen(self.start_cmd, stderr=stderr, stdout=stdout)
except FileNotFoundError as e:
raise FileNotFoundError('When trying to run CoreNLP, a FileNotFoundError occurred, which frequently means Java was not installed or was not in the classpath.') from e
def atexit_kill(self):
if (self.server and (self.server.poll() is None)):
self.server.terminate()
def stop(self):
if self.server:
self.server.terminate()
try:
self.server.wait(5)
except subprocess.TimeoutExpired:
self.server.kill()
try:
self.server.wait(5)
except subprocess.TimeoutExpired:
pass
self.server = None
if self.stop_cmd:
subprocess.run(self.stop_cmd, check=True)
self.is_active = False
def __enter__(self):
self.start()
return self
def __exit__(self, _, __, ___):
self.stop()
def ensure_alive(self):
if self.is_active:
try:
if self.is_alive():
return
else:
self.stop()
except ShouldRetryException:
pass
if (self.server is None):
self.start()
start_time = time.time()
while True:
try:
if self.is_alive():
break
except ShouldRetryException:
pass
if ((time.time() - start_time) < self.CHECK_ALIVE_TIMEOUT):
time.sleep(1)
else:
raise PermanentlyFailedException('Timed out waiting for service to come alive.')
self.is_active = True |
def create_overlap_pair(align1: List[Pair], align2: List[Pair]) -> List[Tuple[(Pair, Pair)]]:
pipeline = itertools.product(align1, align2)
pipeline = filter((lambda x: is_overlap(x[0], x[1])), pipeline)
return list(pipeline) |
class ShiftNegActivationPostAddTest(ShiftNegActivationTest):
def __init__(self, unit_test, linear_op_to_test, activation_op_to_test, post_add_nbits=7):
super().__init__(unit_test, linear_op_to_test, activation_op_to_test)
self.post_add_nbits = post_add_nbits
def get_tpc(self):
return get_keras_tpc_latest()
def get_debug_config(self):
return mct.core.DebugConfig(network_editor=[EditRule(filter=node_filters.NodeNameScopeFilter('activation'), action=actions.ChangeCandidatesActivationQuantConfigAttr(activation_n_bits=self.post_add_nbits))])
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
self.unit_test.assertTrue((float_model.output.shape.as_list() == quantized_model.output.shape.as_list()), msg=f'Outputs shape mismatch: {float_model.output.shape} != {quantized_model.output.shape}')
non_linear_layer_fake_quant = get_layers_from_model_by_type(quantized_model, KerasActivationQuantizationHolder)[1]
non_linear_nbits = non_linear_layer_fake_quant.activation_holder_quantizer.get_config()['num_bits']
self.unit_test.assertTrue((non_linear_nbits == SHIFT_NEGATIVE_NON_LINEAR_NUM_BITS), f"The non-linear node's activation_n_bits after applying snc should be {SHIFT_NEGATIVE_NON_LINEAR_NUM_BITS}, but activation_n_bits is {non_linear_nbits}")
post_add_layer_fake_quant = get_layers_from_model_by_type(quantized_model, KerasActivationQuantizationHolder)[2]
post_add_nbits = post_add_layer_fake_quant.activation_holder_quantizer.get_config()['num_bits']
self.unit_test.assertTrue((post_add_nbits == self.post_add_nbits), f"The post_add layer that's added after the non-linear node should be quantized with {self.post_add_nbits}, but activation_n_bits is {post_add_nbits}") |
def split_model_name(model):
model = model[:(- 3)].replace('.', '_')
for processor in sorted(ending_to_processor.keys(), key=(lambda x: (- len(x)))):
if model.endswith(processor):
model = model[:(- (len(processor) + 1))]
processor = ending_to_processor[processor]
break
else:
raise AssertionError(f'Could not find a processor type in {model}')
(lang, package) = model.split('_', 1)
return (lang, package, processor) |
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, ppo_config, total_time_steps, validate_every_timesteps, task_name):
def _make_env(rank):
def _init():
task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02)
env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=(seed_num + rank), max_episode_length=maximum_episode_length)
return env
set_global_seeds(seed_num)
return _init
policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 256])
env = SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)])
checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model')
model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config)
model.learn(total_timesteps=total_time_steps, tb_log_name='ppo2', callback=checkpoint_callback)
return |
def plot_loss_history(Encodings, datasets, params):
for i in range(len(Encodings)):
Encoding = Encodings[i]
if (datasets == 'mnist'):
if (params == 'large'):
loss_history_CNN = loss_histories_CNN_MNIST_3L[i][::n]
loss_history_QCNN = loss_histories_QCNN_MNIST_SU4[i][::n]
elif (params == 'small'):
loss_history_CNN = loss_histories_CNN_MNIST_2L[i][::n]
loss_history_QCNN = loss_histories_QCNN_MNIST_SO4[i][::n]
elif (datasets == 'fashion'):
if (params == 'large'):
loss_history_CNN = loss_histories_CNN_FASHION_3L[i][::n]
loss_history_QCNN = loss_histories_QCNN_FASHION_SU4[i][::n]
elif (params == 'small'):
loss_history_CNN = loss_histories_CNN_FASHION_2L[i][::n]
loss_history_QCNN = loss_histories_QCNN_FASHION_SO4[i][::n]
plt.plot(loss_history_QCNN)
plt.plot(loss_history_CNN)
plt.title(((('QCNN vs CNN with ' + str(Encoding)) + ' for ') + datasets))
plt.ylabel('loss')
plt.xlabel('iterations')
plt.legend(['QCNN', 'CNN'], loc='upper left')
plt.show() |
class I4PoolFunction(Function):
def forward(ctx, input, guide):
(output, maxout) = _C.I4_pool_forward(input, guide)
ctx.save_for_backward(input, output, guide, maxout)
return output
def backward(ctx, grad_output):
(input, output, guide, maxout) = ctx.saved_variables
(grad_input, grad_guide) = _C.I4_pool_backward(input, guide, output, maxout, grad_output)
return (grad_input, grad_guide) |
def get_logits(model, input_ids, max_length, teacher_outputs=None, **kwargs):
out = model.generate(input_ids=input_ids, max_length=max_length, fused_ft_kernel=True, teacher_outputs=teacher_outputs, return_dict_in_generate=True, output_scores=True, timing=True, **kwargs)
return torch.stack(out.scores, dim=1) |
def get_args_parser():
parser = ArgumentParser(description='additional training specification')
parser.add_argument('--start_epoch', dest='start_epoch', type=int, default=0)
parser.add_argument('--additional_epoch', dest='additional_epoch', type=int, default=100)
parser.add_argument('--lr', dest='lr', type=float, default=0.0001)
parser.add_argument('--optim', dest='optim', type=str, default='adam', choices=['adam', 'sgd'])
parser.add_argument('--leaky_relu', dest='leaky_relu', type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('--ndcg_gain_in_train', dest='ndcg_gain_in_train', type=str, default='exp2', choices=['exp2', 'identity'])
parser.add_argument('--small_dataset', type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('--debug', type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('--double_precision', type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('--standardize', type=str2bool, nargs='?', const=True, default=False)
return parser |
def get_engine():
args = get_db_args()
password = os.getenv('DB_PASS', args['password'])
connect_str = '{}+pymysql://{}:{}{}:{}/{}?charset=utf8'.format(args['db_type'], args['user'], password, args['host'], args['port'], args['db_name'])
engine = create_engine(connect_str, encoding='utf-8')
return engine |
def test_amt_annotator_track_no_map(completed_amt_job_metadata):
sub_to_count = paying_annotators.track_tasks(completed_amt_job_metadata)
assert (sub_to_count == {'6f202e93-e6b6-4e1d-8f07-0484b9a9093a': 20, '2b674d33-f656-44b0-8f90-d70a1ab71ec2': 20, 'afce8c28-969c-4e73-a20f-622ef122f585': 3, '91f6236e-63c6-4a84-8fd6-1efbab6dedab': 16}) |
.parametrize('mutation, schema', ((negate_constraints, {'type': 'integer', 'minimum': 42}), (negate_constraints, {'minimum': 42}), (change_type, {'type': 'object'}), (change_type, {'type': ['object', 'array']}), (change_type, {'type': ['string', 'integer', 'number', 'object', 'array', 'boolean']}), (remove_required_property, {'properties': {'foo': {}}, 'required': ['foo']}), (remove_required_property, {'properties': {'foo': {}, 'bar': {}}, 'required': ['foo']}), (remove_required_property, {'required': ['foo']}), (change_items, {'type': 'array', 'items': {'type': 'string'}}), (change_items, {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1}), (change_items, {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1, 'maxItems': 1}), (change_items, {'type': 'array', 'items': [{'type': 'string'}]}), (change_items, {'type': 'array', 'items': [{'type': 'string'}], 'minItems': 1}), (change_items, {'type': 'array', 'items': [{'type': 'string'}], 'minItems': 1, 'maxItems': 1}), (change_properties, {'properties': {'foo': {'type': 'integer'}}, 'type': 'object', 'required': ['foo']}), (change_properties, {'properties': {'foo': {'type': 'integer'}}, 'type': ['object']}), (change_properties, {'properties': {'foo': {'type': 'integer'}}, 'type': 'object'}), (change_properties, {'properties': {'foo': {'type': 'integer'}}}), (change_properties, {'properties': {'foo': {'type': 'string', 'minLength': 5}, 'bar': {'type': 'string', 'minLength': 5}}, 'type': 'object', 'required': ['foo', 'bar'], 'additionalProperties': False})))
(data=st.data())
(deadline=None, suppress_health_check=SUPPRESSED_HEALTH_CHECKS, max_examples=MAX_EXAMPLES)
def test_successful_mutations(data, mutation, schema):
validate_schema(schema)
validator = Draft4Validator(schema)
schema = fast_deepcopy(schema)
assert (mutation(MutationContext(schema, {}, 'body', 'application/json'), data.draw, schema) == MutationResult.SUCCESS)
validate_schema(schema)
new_instance = data.draw(from_schema(schema))
assert (not validator.is_valid(new_instance)) |
def _choose_uniform(s: int, lower: Union[(int, float)], upper: Union[(int, float)], type_: type) -> Union[(int, float)]:
np.random.seed(seed=s)
assert (lower <= upper), '`upper` must be larger than or equal to `lower`'
assert (type_ in [int, float]), f'`type_` must be int or float but {type_} is given'
if (lower == upper):
return lower
if (type_ == int):
return np.random.randint(lower, upper, dtype=type_)
else:
return np.random.uniform(lower, upper) |
def test(args):
device = torch.device(('cuda' if args.cuda else 'cpu'))
pprint(args.__dict__)
interface = FileInterface(**args.__dict__)
if args.cache:
out = interface.cache(preprocess, args)
processor = out['processor']
processed_metadata = out['processed_metadata']
else:
processor = Processor(**args.__dict__)
metadata = interface.load_metadata()
processed_metadata = processor.process_metadata(metadata)
model = Model(**args.__dict__).to(device)
model.init(processed_metadata)
interface.bind(processor, model)
interface.load(args.iteration, session=args.load_dir)
test_examples = interface.load_test()
test_dataset = tuple((processor.preprocess(example) for example in test_examples))
test_sampler = Sampler(test_dataset, 'test', **args.__dict__)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, sampler=test_sampler, collate_fn=processor.collate)
print('Inferencing')
with torch.no_grad():
model.eval()
pred = {}
for (batch_idx, (test_batch, _)) in enumerate(zip(test_loader, range(args.eval_steps))):
test_batch = {key: val.to(device) for (key, val) in test_batch.items()}
model_output = model(**test_batch)
results = processor.postprocess_batch(test_dataset, test_batch, model_output)
if ((batch_idx % args.dump_period) == 0):
dump = processor.get_dump(test_dataset, test_batch, model_output, results)
interface.dump(batch_idx, dump)
for result in results:
pred[result['id']] = result['pred']
print(('[%d/%d]' % ((batch_idx + 1), len(test_loader))))
interface.pred(pred) |
def test_cartesian():
muon = ak.Array([[{'pt': 1.0}], []], with_name='muon')
electron = ak.Array([[], [{'pt': 1.0}]], with_name='electron')
muon = muon[(muon.pt > 5)]
electron = electron[(electron.pt > 5)]
leptons = ak.operations.concatenate([muon, electron], axis=1)
candidate = ak.operations.firsts(leptons)
assert (to_list(ak.Array(candidate)) == [None, None])
result = ak.operations.cartesian([candidate, candidate], axis=0)
assert (to_list(result) == [(None, None), (None, None), (None, None), (None, None)])
result = ak.operations.cartesian([candidate, ak.Array([[1, 2, 3], []])], axis=1)
assert (to_list(result) == [None, None])
(one, two) = ak.operations.broadcast_arrays(candidate, ak.Array([[1, 2, 3], []]))
assert (to_list(one) == [None, None])
assert (to_list(two) == [None, None]) |
def load(url):
response = requests.get(url)
return np.ascontiguousarray(Image.open(BytesIO(response.content)), dtype=np.uint8) |
class BartOnnxConfig(OnnxSeq2SeqConfigWithPast):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
elif (self.task == 'causal-lm'):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})])
return common_inputs
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_outputs = super().outputs
else:
common_outputs = super(OnnxConfigWithPast, self).outputs
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, encoder_seq_length) = common_inputs['input_ids'].shape
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_past_length = (decoder_seq_length + 3)
decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1)
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
(num_encoder_layers, _) = self.num_layers
(num_encoder_attention_heads, _) = self.num_attention_heads
past_shape = (batch, num_encoder_attention_heads, past_key_values_length, (self._config.hidden_size // num_encoder_attention_heads))
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length)], dim=1)
common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)]
return common_inputs
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.DEFAULT_FIXED_BATCH, num_token_to_add=0)
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.DEFAULT_FIXED_SEQUENCE, num_token_to_add=token_to_add)
dummy_input = ([(' '.join([tokenizer.unk_token]) * seq_length)] * batch_size)
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
elif (self.task == 'causal-lm'):
common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
else:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
return common_inputs
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
if (self.task in ['default', 'seq2seq-lm']):
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) |
def get_pip_version():
pip_pkg_dir = os.path.join(os.path.dirname(__file__), '..', '..')
pip_pkg_dir = os.path.abspath(pip_pkg_dir)
return 'pip {} from {} (python {})'.format(__version__, pip_pkg_dir, get_major_minor_version()) |
('is_digit')
class IsDigitFactory(SingleFeatureFactory):
def compute_feature(self, tokens, token_index):
return ('1' if tokens[token_index].value.isdigit() else None) |
class PongProtocol(Protocol):
def __init__(self, own: Node, name: str, other_name: str, other_node: str):
super().__init__(own, name)
own.protocols.append(self)
self.other_name = other_name
self.other_node = other_node
def init(self):
pass
def received_message(self, src: str, message: Message):
assert (message.msg_type == MsgType.PING)
print('node {} received ping message at time {}'.format(self.own.name, self.own.timeline.now()))
new_msg = Message(MsgType.PONG, self.other_name)
self.own.send_message(self.other_node, new_msg) |
def enableScriptTest():
def script_dec(func):
def wrapper(self):
self.is_script_test_enabled = True
return func(self)
return wrapper
return script_dec |
def linear_ramp(birth, pers, low=0.0, high=1.0, start=0.0, end=1.0):
try:
n = len(birth)
except:
n = 1
birth = [birth]
pers = [pers]
w = np.zeros((n,))
for i in range(n):
if (pers[i] < start):
w[i] = low
elif (pers[i] > end):
w[i] = high
else:
w[i] = ((((pers[i] - start) * (high - low)) / (end - start)) + low)
return w |
def resblock(x_init, channels, use_bias=True, sn=False, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn)
x = instance_norm(x)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn)
x = instance_norm(x)
return (x + x_init) |
def load_onnx_graph(fname):
import onnx
m = onnx.load(fname)
g = m.graph
return parse(g) |
def _group_to_str(group: List[str]) -> str:
if (len(group) == 0):
return ''
if (len(group) == 1):
return ('.' + group[0])
return (('.{' + ', '.join(group)) + '}') |
def main():
parser = argparse.ArgumentParser(description='Caffe2: Benchmark for net construction')
parser.add_argument('--num_gpus', type=int, default=1, help='Number of GPUs.')
args = parser.parse_args()
Create(args) |
class Homsets(Category_singleton):
def super_categories(self):
from .sets_cat import Sets
return [Sets()]
class SubcategoryMethods():
def Endset(self):
return self._with_axiom('Endset')
class Endset(CategoryWithAxiom):
def extra_super_categories(self):
from .monoids import Monoids
return [Monoids()]
class ParentMethods():
def is_endomorphism_set(self):
return True
class ParentMethods():
def is_endomorphism_set(self):
sD = self.domain()
sC = self.codomain()
if ((sC is None) or (sD is None)):
raise RuntimeError('Domain or codomain of this homset have been deallocated')
return (sD is sC) |
def make_index(data_path):
subsets = ['development', 'evaluation']
annotations = {'development': 'metadata_dev', 'evaluation': 'metadata_eval'}
formats = ['foa', 'mic']
index = {'version': '1.2.0', 'clips': {}, 'metadata': {}}
for subset in subsets:
for formt in formats:
if (subset == 'development'):
index = _index_wav(index, data_path, formt, 'dev')
index = _index_event(index, data_path, formt, annotations[subset], 'dev')
elif (subset == 'evaluation'):
index = _index_wav(index, data_path, formt, 'eval')
index = _index_event(index, data_path, formt, annotations[subset], 'eval')
with open(DATASET_INDEX_PATH, 'w') as fhandle:
json.dump(index, fhandle, indent=2) |
class TNGraphMtx(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TNGraphMtx_swiginit(self, _snap.new_TNGraphMtx(*args))
def PGetRows(self):
return _snap.TNGraphMtx_PGetRows(self)
def PGetCols(self):
return _snap.TNGraphMtx_PGetCols(self)
def PMultiply(self, *args):
return _snap.TNGraphMtx_PMultiply(self, *args)
def PMultiplyT(self, *args):
return _snap.TNGraphMtx_PMultiplyT(self, *args)
__swig_destroy__ = _snap.delete_TNGraphMtx |
def unescape(text):
if (text == '-NONE-'):
return None
text = text.replace('-``-', '"').replace('-`-', "'").replace('-SP-', ' ').replace('-TAB-', '\t').replace('-NL-', '\n').replace('-NL2-', '\r').replace('-LRB-', '(').replace('-RRB-', ')').replace('-BAR-', '|').replace('-EMPTY-', '')
return text |
_module()
class ISPRSDataset(CustomDataset):
CLASSES = ('impervious_surface', 'building', 'low_vegetation', 'tree', 'car', 'clutter')
PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0]]
def __init__(self, **kwargs):
super(ISPRSDataset, self).__init__(img_suffix='.png', seg_map_suffix='.png', reduce_zero_label=True, **kwargs) |
def test_esrgan():
model_cfg = dict(type='ESRGAN', generator=dict(type='MSRResNet', in_channels=3, out_channels=3, mid_channels=4, num_blocks=1, upscale_factor=4), discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), gan_loss=dict(type='GANLoss', gan_type='vanilla', real_label_val=1.0, fake_label_val=0, loss_weight=0.005))
train_cfg = None
test_cfg = None
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
assert (restorer.__class__.__name__ == 'ESRGAN')
assert isinstance(restorer.generator, MSRResNet)
assert isinstance(restorer.discriminator, ModifiedVGG)
assert isinstance(restorer.pixel_loss, L1Loss)
assert isinstance(restorer.gan_loss, GANLoss)
inputs = torch.rand(1, 3, 32, 32)
targets = torch.rand(1, 3, 128, 128)
data_batch = {'lq': inputs, 'gt': targets}
optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999))
optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))}
with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']:
assert isinstance(outputs['log_vars'][v], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 128, 128))
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))}
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0).cuda(), torch.tensor(2.0).cuda())):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']:
assert isinstance(outputs['log_vars'][v], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 128, 128))
data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu()}
train_cfg = dict(disc_steps=2, disc_init_steps=2)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 128, 128))
model_cfg_ = model_cfg.copy()
model_cfg_.pop('pixel_loss')
restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 128, 128))
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(restorer, 'perceptual_loss', return_value=(None, torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']:
assert isinstance(outputs['log_vars'][v], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 128, 128))
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(2.0), None)):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']:
assert isinstance(outputs['log_vars'][v], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 128, 128)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.