code stringlengths 101 5.91M |
|---|
class SeekPaginationDef(BaseDef):
type: str = Field('seek', const=True)
max_count: int
limit_key: str
seek_id: str
seek_key: str |
_model
def tf_efficientnet_b1_ns(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model |
def test_get_nan_component_value():
row = pd.Series([np.nan, 2, np.nan, 4], index=['a', 'b', 'c', 'd'])
result = get_nan_component_value(row)
assert (result == 'a, c') |
class PadCollator():
def __init__(self, tokenizer, device, max_segment_len=512):
self.tokenizer = tokenizer
self.device = device
self.max_segment_len = max_segment_len
def __call__(self, batch):
batch = self.tokenizer.pad(batch)
batch['input_ids'] = torch.tensor(batch['input_ids'], device=self.device)
batch['attention_mask'] = torch.tensor(batch['attention_mask'], device=self.device)
if ('gold_clusters' in batch):
(max_num_clusters, max_max_cluster_size) = (max(batch['num_clusters']), max(batch['max_cluster_size']))
if (max_num_clusters and max_max_cluster_size):
padded_clusters = [pad_clusters(cluster, max_num_clusters, max_max_cluster_size) for cluster in batch['gold_clusters']]
batch['gold_clusters'] = torch.tensor(padded_clusters, device=self.device)
else:
batch['gold_clusters'] = None
return batch |
def build_horpn_head(cfg, input_shape):
name = cfg.MODEL.RPN.HEAD_NAME
return HORPN_HEAD_REGISTRY.get(name)(cfg, input_shape) |
class FiniteDimensionalHighestWeightCrystal_TypeE(TensorProductOfCrystals):
def __init__(self, dominant_weight):
self._cartan_type = dominant_weight.parent().cartan_type()
self._highest_weight = dominant_weight
assert dominant_weight.is_dominant()
self.rename()
Parent.__init__(self, category=ClassicalCrystals())
self.module_generators = [self.module_generator()]
def _repr_(self):
return 'Finite dimensional highest weight crystal of type {} and highest weight {}'.format(self._cartan_type, self._highest_weight)
Element = TensorProductOfRegularCrystalsElement
def module_generator(self):
dominant_weight = self._highest_weight
tensor = sum((([self.column_crystal[i]] * dominant_weight.coefficient(i)) for i in dominant_weight.support()), [])
return self._element_constructor_(*[B.module_generators[0] for B in tensor]) |
def _load_llff_image(idx: int, paths: List[str], data_dir: str, out_h: int, out_w: int) -> torch.Tensor:
f_path = os.path.join(data_dir, paths[idx])
img = Image.open(f_path).convert('RGB')
img = img.resize((out_w, out_h), Image.LANCZOS)
img = pil2tensor(img)
img = img.permute(1, 2, 0)
return img |
class Lark(Serialize):
def __init__(self, grammar, **options):
self.options = LarkOptions(options)
use_regex = self.options.regex
if use_regex:
if regex:
re_module = regex
else:
raise ModuleNotFoundError('`regex` module must be installed if calling `Lark(regex=True)`.')
else:
re_module = re
if (self.options.source_path is None):
try:
self.source_path = grammar.name
except AttributeError:
self.source_path = '<string>'
else:
self.source_path = self.options.source_path
try:
read = grammar.read
except AttributeError:
pass
else:
grammar = read()
assert isinstance(grammar, STRING_TYPE)
self.source_grammar = grammar
if self.options.use_bytes:
if (not isascii(grammar)):
raise ValueError('Grammar must be ascii only, when use_bytes=True')
cache_fn = None
if self.options.cache:
if (self.options.parser != 'lalr'):
raise NotImplementedError("cache only works with parser='lalr' for now")
if isinstance(self.options.cache, STRING_TYPE):
cache_fn = self.options.cache
else:
if (self.options.cache is not True):
raise ValueError('cache argument must be bool or str')
unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals')
from . import __version__
options_str = ''.join(((k + str(v)) for (k, v) in options.items() if (k not in unhashable)))
s = ((grammar + options_str) + __version__)
md5 = hashlib.md5(s.encode()).hexdigest()
cache_fn = (tempfile.gettempdir() + ('/.lark_cache_%s.tmp' % md5))
if FS.exists(cache_fn):
logger.debug('Loading grammar from cache: %s', cache_fn)
for name in (set(options) - _LOAD_ALLOWED_OPTIONS):
del options[name]
with FS.open(cache_fn, 'rb') as f:
self._load(f, **options)
return
if (self.options.lexer == 'auto'):
if (self.options.parser == 'lalr'):
self.options.lexer = 'contextual'
elif (self.options.parser == 'earley'):
self.options.lexer = 'dynamic'
elif (self.options.parser == 'cyk'):
self.options.lexer = 'standard'
else:
assert False, self.options.parser
lexer = self.options.lexer
assert ((lexer in ('standard', 'contextual', 'dynamic', 'dynamic_complete')) or issubclass(lexer, Lexer))
if (self.options.ambiguity == 'auto'):
if (self.options.parser == 'earley'):
self.options.ambiguity = 'resolve'
else:
disambig_parsers = ['earley', 'cyk']
assert (self.options.parser in disambig_parsers), ('Only %s supports disambiguation right now' % ', '.join(disambig_parsers))
if (self.options.priority == 'auto'):
self.options.priority = 'normal'
if (self.options.priority not in _VALID_PRIORITY_OPTIONS):
raise ValueError(('invalid priority option: %r. Must be one of %r' % (self.options.priority, _VALID_PRIORITY_OPTIONS)))
assert (self.options.ambiguity not in ('resolve__antiscore_sum',)), 'resolve__antiscore_sum has been replaced with the option priority="invert"'
if (self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS):
raise ValueError(('invalid ambiguity option: %r. Must be one of %r' % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)))
self.grammar = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens)
if (self.options.postlex is not None):
terminals_to_keep = set(self.options.postlex.always_accept)
else:
terminals_to_keep = set()
(self.terminals, self.rules, self.ignore_tokens) = self.grammar.compile(self.options.start, terminals_to_keep)
if self.options.edit_terminals:
for t in self.terminals:
self.options.edit_terminals(t)
self._terminals_dict = {t.name: t for t in self.terminals}
if (self.options.priority == 'invert'):
for rule in self.rules:
if (rule.options.priority is not None):
rule.options.priority = (- rule.options.priority)
elif (self.options.priority is None):
for rule in self.rules:
if (rule.options.priority is not None):
rule.options.priority = None
lexer_callbacks = (_get_lexer_callbacks(self.options.transformer, self.terminals) if self.options.transformer else {})
lexer_callbacks.update(self.options.lexer_callbacks)
self.lexer_conf = LexerConf(self.terminals, re_module, self.ignore_tokens, self.options.postlex, lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes)
if self.options.parser:
self.parser = self._build_parser()
elif lexer:
self.lexer = self._build_lexer()
if cache_fn:
logger.debug('Saving grammar to cache: %s', cache_fn)
with FS.open(cache_fn, 'wb') as f:
self.save(f)
if __doc__:
__doc__ += ('\n\n' + LarkOptions.OPTIONS_DOC)
__serialize_fields__ = ('parser', 'rules', 'options')
def _build_lexer(self):
return TraditionalLexer(self.lexer_conf)
def _prepare_callbacks(self):
self.parser_class = get_frontend(self.options.parser, self.options.lexer)
self._callbacks = None
if (self.options.ambiguity != 'forest'):
self._parse_tree_builder = ParseTreeBuilder(self.rules, (self.options.tree_class or Tree), self.options.propagate_positions, ((self.options.parser != 'lalr') and (self.options.ambiguity == 'explicit')), self.options.maybe_placeholders)
self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer)
def _build_parser(self):
self._prepare_callbacks()
parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)
return self.parser_class(self.lexer_conf, parser_conf, options=self.options)
def save(self, f):
(data, m) = self.memo_serialize([TerminalDef, Rule])
pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL)
def load(cls, f):
inst = cls.__new__(cls)
return inst._load(f)
def _load(self, f, **kwargs):
if isinstance(f, dict):
d = f
else:
d = pickle.load(f)
memo = d['memo']
data = d['data']
assert memo
memo = SerializeMemoizer.deserialize(memo, {'Rule': Rule, 'TerminalDef': TerminalDef}, {})
options = dict(data['options'])
if ((set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults)):
raise ValueError('Some options are not allowed when loading a Parser: {}'.format((set(kwargs) - _LOAD_ALLOWED_OPTIONS)))
options.update(kwargs)
self.options = LarkOptions.deserialize(options, memo)
self.rules = [Rule.deserialize(r, memo) for r in data['rules']]
self.source_path = '<deserialized>'
self._prepare_callbacks()
self.parser = self.parser_class.deserialize(data['parser'], memo, self._callbacks, self.options)
self.terminals = self.parser.lexer_conf.tokens
self._terminals_dict = {t.name: t for t in self.terminals}
return self
def _load_from_dict(cls, data, memo, **kwargs):
inst = cls.__new__(cls)
return inst._load({'data': data, 'memo': memo}, **kwargs)
def open(cls, grammar_filename, rel_to=None, **options):
if rel_to:
basepath = os.path.dirname(rel_to)
grammar_filename = os.path.join(basepath, grammar_filename)
with open(grammar_filename, encoding='utf8') as f:
return cls(f, **options)
def open_from_package(cls, package, grammar_path, search_paths=('',), **options):
package = FromPackageLoader(package, search_paths)
(full_path, text) = package(None, grammar_path)
options.setdefault('source_path', full_path)
options.setdefault('import_paths', [])
options['import_paths'].append(package)
return cls(text, **options)
def __repr__(self):
return ('Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer))
def lex(self, text):
if (not hasattr(self, 'lexer')):
self.lexer = self._build_lexer()
stream = self.lexer.lex(text)
if self.options.postlex:
return self.options.postlex.process(stream)
return stream
def get_terminal(self, name):
return self._terminals_dict[name]
def parse(self, text, start=None, on_error=None):
try:
return self.parser.parse(text, start=start)
except UnexpectedInput as e:
if (on_error is None):
raise
while True:
if isinstance(e, UnexpectedCharacters):
s = e.puppet.lexer_state.state
p = s.line_ctr.char_pos
if (not on_error(e)):
raise e
if isinstance(e, UnexpectedCharacters):
if (p == s.line_ctr.char_pos):
s.line_ctr.feed(s.text[p:(p + 1)])
try:
return e.puppet.resume_parse()
except UnexpectedToken as e2:
if (isinstance(e, UnexpectedToken) and (e.token.type == e2.token.type == '$END') and (e.puppet == e2.puppet)):
raise e2
e = e2
except UnexpectedCharacters as e2:
e = e2
def source(self):
warn('Lark.source attribute has been renamed to Lark.source_path', DeprecationWarning)
return self.source_path
def source(self, value):
self.source_path = value
def grammar_source(self):
warn('Lark.grammar_source attribute has been renamed to Lark.source_grammar', DeprecationWarning)
return self.source_grammar
_source.setter
def grammar_source(self, value):
self.source_grammar = value |
def compress_for_output_listing(paths):
will_remove = list(paths)
will_skip = set()
folders = set()
files = set()
for path in will_remove:
if path.endswith('.pyc'):
continue
if (path.endswith('__init__.py') or ('.dist-info' in path)):
folders.add(os.path.dirname(path))
files.add(path)
folders = compact(folders)
for folder in folders:
for (dirpath, _, dirfiles) in os.walk(folder):
for fname in dirfiles:
if fname.endswith('.pyc'):
continue
file_ = os.path.normcase(os.path.join(dirpath, fname))
if (os.path.isfile(file_) and (file_ not in files)):
will_skip.add(file_)
will_remove = (files | {os.path.join(folder, '*') for folder in folders})
return (will_remove, will_skip) |
class Registry():
def __init__(self, name, build_func=None, parent=None, scope=None):
self._name = name
self._module_dict = dict()
self._children = dict()
self._scope = (self.infer_scope() if (scope is None) else scope)
if (build_func is None):
if (parent is not None):
self.build_func = parent.build_func
else:
self.build_func = build_from_cfg
else:
self.build_func = build_func
if (parent is not None):
assert isinstance(parent, Registry)
parent._add_children(self)
self.parent = parent
else:
self.parent = None
def __len__(self):
return len(self._module_dict)
def __contains__(self, key):
return (self.get(key) is not None)
def __repr__(self):
format_str = (self.__class__.__name__ + f'(name={self._name}, items={self._module_dict})')
return format_str
def infer_scope():
filename = inspect.getmodule(inspect.stack()[2][0]).__name__
split_filename = filename.split('.')
return split_filename[0]
def split_scope_key(key):
split_index = key.find('.')
if (split_index != (- 1)):
return (key[:split_index], key[(split_index + 1):])
else:
return (None, key)
def name(self):
return self._name
def scope(self):
return self._scope
def module_dict(self):
return self._module_dict
def children(self):
return self._children
def get(self, key):
(scope, real_key) = self.split_scope_key(key)
if ((scope is None) or (scope == self._scope)):
if (real_key in self._module_dict):
return self._module_dict[real_key]
elif (scope in self._children):
return self._children[scope].get(real_key)
else:
parent = self.parent
while (parent.parent is not None):
parent = parent.parent
return parent.get(key)
def build(self, *args, **kwargs):
return self.build_func(*args, **kwargs, registry=self)
def _add_children(self, registry):
assert isinstance(registry, Registry)
assert (registry.scope is not None)
assert (registry.scope not in self.children), f'scope {registry.scope} exists in {self.name} registry'
self.children[registry.scope] = registry
def _register_module(self, module_class, module_name=None, force=False):
if (not inspect.isclass(module_class)):
raise TypeError(f'module must be a class, but got {type(module_class)}')
if (module_name is None):
module_name = module_class.__name__
if isinstance(module_name, str):
module_name = [module_name]
for name in module_name:
if ((not force) and (name in self._module_dict)):
raise KeyError(f'{name} is already registered in {self.name}')
self._module_dict[name] = module_class
def deprecated_register_module(self, cls=None, force=False):
warnings.warn('The old API of register_module(module, force=False) is deprecated and will be removed, please use the new API register_module(name=None, force=False, module=None) instead.')
if (cls is None):
return partial(self.deprecated_register_module, force=force)
self._register_module(cls, force=force)
return cls
def register_module(self, name=None, force=False, module=None):
if (not isinstance(force, bool)):
raise TypeError(f'force must be a boolean, but got {type(force)}')
if isinstance(name, type):
return self.deprecated_register_module(name, force=force)
if (not ((name is None) or isinstance(name, str) or misc.is_seq_of(name, str))):
raise TypeError(f'name must be either of None, an instance of str or a sequence of str, but got {type(name)}')
if (module is not None):
self._register_module(module_class=module, module_name=name, force=force)
return module
def _register(cls):
self._register_module(module_class=cls, module_name=name, force=force)
return cls
return _register |
def toTensor(G_times):
T = []
for G in G_times:
A = nx.to_numpy_matrix(G)
A = np.resize(A, (100, 100))
A = np.asarray(A)
A.astype(float)
T.append(A)
T = tl.tensor(T)
return T |
def test_build_ket():
keys = [0]
amps = [complex(1), complex(0)]
_ = KetState(amps, keys)
amps = [complex(sqrt((1 / 2))), complex(sqrt((1 / 2)))]
_ = KetState(amps, keys)
amps = [complex(0), complex(1j)]
_ = KetState(amps, keys)
amps = [complex(1), complex(0), complex(0), complex(0)]
_ = KetState(amps, [0, 1])
amps = [complex((3 / 2)), complex(0)]
with pytest.raises(AssertionError, match='Illegal value with abs > 1 in ket vector'):
_ = KetState(amps, keys)
amps = [complex(0), complex(0)]
with pytest.raises(AssertionError, match='Squared amplitudes do not sum to 1'):
_ = KetState(amps, keys)
amps = [complex(1), complex(0), complex(0)]
with pytest.raises(AssertionError):
_ = KetState(amps, keys)
amps = [complex(1), complex(0), complex(0), complex(0)]
with pytest.raises(AssertionError):
_ = KetState(amps, keys) |
def visualize_sr(img, halve=False):
hr_img = Image.open(img, mode='r')
hr_img = hr_img.convert('RGB')
if halve:
hr_img = hr_img.resize((int((hr_img.width / 2)), int((hr_img.height / 2))), Image.LANCZOS)
lr_img = hr_img.resize((int((hr_img.width / 4)), int((hr_img.height / 4))), Image.BICUBIC)
bicubic_img = lr_img.resize((hr_img.width, hr_img.height), Image.BICUBIC)
sr_img_srresnet = srresnet(convert_image(lr_img, source='pil', target='imagenet-norm').unsqueeze(0).to(device))
sr_img_srresnet = sr_img_srresnet.squeeze(0).cpu().detach()
sr_img_srresnet = convert_image(sr_img_srresnet, source='[-1, 1]', target='pil')
sr_img_srgan = srgan_generator(convert_image(lr_img, source='pil', target='imagenet-norm').unsqueeze(0).to(device))
sr_img_srgan = sr_img_srgan.squeeze(0).cpu().detach()
sr_img_srgan = convert_image(sr_img_srgan, source='[-1, 1]', target='pil')
margin = 40
grid_img = Image.new('RGB', (((2 * hr_img.width) + (3 * margin)), ((2 * hr_img.height) + (3 * margin))), (255, 255, 255))
draw = ImageDraw.Draw(grid_img)
try:
font = ImageFont.truetype('calibril.ttf', size=23)
except OSError:
print('Defaulting to a terrible font. To use a font of your choice, include the link to its TTF file in the function.')
font = ImageFont.load_default()
grid_img.paste(bicubic_img, (margin, margin))
text_size = font.getsize('Bicubic')
draw.text(xy=[((margin + (bicubic_img.width / 2)) - (text_size[0] / 2)), ((margin - text_size[1]) - 5)], text='Bicubic', font=font, fill='black')
grid_img.paste(sr_img_srresnet, (((2 * margin) + bicubic_img.width), margin))
text_size = font.getsize('SRResNet')
draw.text(xy=[((((2 * margin) + bicubic_img.width) + (sr_img_srresnet.width / 2)) - (text_size[0] / 2)), ((margin - text_size[1]) - 5)], text='SRResNet', font=font, fill='black')
grid_img.paste(sr_img_srgan, (margin, ((2 * margin) + sr_img_srresnet.height)))
text_size = font.getsize('SRGAN')
draw.text(xy=[((margin + (bicubic_img.width / 2)) - (text_size[0] / 2)), ((((2 * margin) + sr_img_srresnet.height) - text_size[1]) - 5)], text='SRGAN', font=font, fill='black')
grid_img.paste(hr_img, (((2 * margin) + bicubic_img.width), ((2 * margin) + sr_img_srresnet.height)))
text_size = font.getsize('Original HR')
draw.text(xy=[((((2 * margin) + bicubic_img.width) + (sr_img_srresnet.width / 2)) - (text_size[0] / 2)), ((((2 * margin) + sr_img_srresnet.height) - text_size[1]) - 1)], text='Original HR', font=font, fill='black')
grid_img.show()
return grid_img |
def query_2_kde_sql(query: Query, table: Table):
preds = []
for (col, pred) in query.predicates.items():
if (pred is None):
continue
(op, val) = pred
if is_categorical(table.data[col].dtype):
assert ((op == '=') and (not isinstance(val, tuple))), val
val = table.columns[col].discretize(val).item()
if (op == '[]'):
preds.append(f'{col} >= {val[0]}')
preds.append(f'{col} <= {val[1]}')
else:
preds.append(f'{col} {op} {val}')
return f"""SELECT * FROM "{table.name}" WHERE {' AND '.join(preds)}""" |
def test_chain_movement_1(env_two_agents):
env = env_two_agents
env.agents[0].x = 3
env.agents[0].y = 25
env.agents[0].dir = Direction.RIGHT
env.agents[1].x = 4
env.agents[1].y = 25
env.agents[1].dir = Direction.RIGHT
env._recalc_grid()
env.step([Action.FORWARD, Action.FORWARD])
assert (env.agents[0].x == 4)
assert (env.agents[0].y == 25)
assert (env.agents[1].x == 5)
assert (env.agents[1].y == 25) |
class NoTransformation(TransformationBase):
def __init__(self, parser_path: str, language: str) -> object:
super().__init__(parser_path, language)
if (not os.path.exists(parser_path)):
raise ValueError(f'Language parser does not exist at {parser_path}. Please run `setup.sh` to properly set the environment!')
self.lang_object = Language(parser_path, language)
self.parser = Parser()
self.parser.set_language(self.lang_object)
processor_map = {'java': self.get_tokens_with_node_type, 'c': self.get_tokens_with_node_type, 'cpp': self.get_tokens_with_node_type, 'c_sharp': self.get_tokens_with_node_type, 'javascript': JavascriptProcessor.get_tokens, 'python': PythonProcessor.get_tokens, 'php': PhpProcessor.get_tokens, 'ruby': self.get_tokens_with_node_type, 'go': self.get_tokens_with_node_type}
self.processor = processor_map[language]
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
root_node = self.parse_code(code=code)
return_values = self.processor(code=code.encode(), root=root_node)
if isinstance(return_values, tuple):
(tokens, types) = return_values
else:
(tokens, types) = (return_values, None)
return (re.sub('[ \t\n]+', ' ', ' '.join(tokens)), {'types': types, 'success': False}) |
def batched_boarders_and_data(data_min_size=5, data_max_size=10, examples_min_number=1, examples_max_number=4, example_min_size=1, example_max_size=3, dtype=np.float32, elements=None):
dims_ = st.tuples(st.integers(min_value=data_min_size, max_value=data_max_size), st.integers(min_value=examples_min_number, max_value=examples_max_number), st.integers(min_value=example_min_size, max_value=example_max_size))
return dims_.flatmap((lambda dims: st.tuples(hu.arrays([dims[1], dims[2], 2], dtype=np.int32, elements=st.integers(min_value=0, max_value=dims[0])), hu.arrays([dims[0]], dtype, elements)))) |
class BenchMatrixPower(Benchmark):
params = [[0, 1, 2, 3, 8, 9], [1000], [1e-06, 0.001]]
param_names = ['x', 'N', 'density']
def setup(self, x: int, N: int, density: float):
self.A = random(N, N, density=density, format='csr')
def time_matrix_power(self, x: int, N: int, density: float):
(self.A ** x) |
class MockClassifier(MLClassifierBase):
def __init__(self):
super(MockClassifier, self).__init__()
def fit(self, X, y):
self.label_count = y.shape[1]
return self
def predict(self, X):
return csr_matrix(np.ones(shape=(X.shape[0], self.label_count), dtype=int)) |
def forward_state(app):
(Output('forward', 'disabled'), Input('forward', 'n_clicks'), Input('forward-N', 'children'))
def callback(click, done):
ctx = dash.callback_context
button_id = [x['prop_id'].split('.')[0] for x in ctx.triggered]
if ('forward-N' in button_id):
return False
if ('forward' in button_id):
return True |
def unpickle_power_series_ring_v0(base_ring, name, default_prec, sparse):
return PowerSeriesRing(base_ring, name=name, default_prec=default_prec, sparse=sparse) |
class VariableSignature(Signature):
def __init__(self, id_, return_type, name=None):
super(VariableSignature, self).__init__(id_, return_type, 0, name=name)
def __repr__(self):
return ('$%s:%s' % (self.name, self.return_type))
def simple_repr(self):
return self.name
def is_ref(self):
if isinstance(self.name, str):
return self.name.startswith('')
return False |
def generate_induce_artifacts(jpeg_quality_range, scale_factor_range):
assert (len(jpeg_quality_range) == 2)
assert all([(1 <= val <= 100) for val in jpeg_quality_range])
assert (jpeg_quality_range[0] <= jpeg_quality_range[1])
assert (len(scale_factor_range) == 2)
assert all([(0 < val <= 1) for val in scale_factor_range])
assert (scale_factor_range[0] <= scale_factor_range[1])
log_scale_min = np.log(scale_factor_range[0])
log_scale_max = np.log(scale_factor_range[1])
def induce_artifacts(img):
log_scale = np.random.uniform(log_scale_min, log_scale_max)
scale = np.exp(log_scale)
quality = int(np.random.uniform(jpeg_quality_range[0], jpeg_quality_range[1]))
new_size = (int((img.size[0] * scale)), int((img.size[1] * scale)))
img_small = img.resize(new_size)
f = io.BytesIO()
img_small.save(f, format='JPEG', quality=quality)
img_small = Image.open(f)
img = img_small.resize(img.size)
return img
return transforms.Lambda(induce_artifacts) |
class FileWriter():
def __init__(self, xpid: str=None, xp_args: dict=None, rootdir: str='~/palaas'):
if (not xpid):
xpid = '{proc}_{unixtime}'.format(proc=os.getpid(), unixtime=int(time.time()))
self.xpid = xpid
self._tick = 0
if (xp_args is None):
xp_args = {}
self.metadata = gather_metadata()
self.metadata['args'] = copy.deepcopy(xp_args)
self.metadata['xpid'] = self.xpid
formatter = logging.Formatter('%(message)s')
self._logger = logging.getLogger('palaas/out')
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
self.basepath = os.path.join(rootdir, self.xpid)
if (not os.path.exists(self.basepath)):
self._logger.info('Creating log directory: %s', self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info('Found log directory: %s', self.basepath)
self.paths = dict(msg='{base}/out.log'.format(base=self.basepath), logs='{base}/logs.csv'.format(base=self.basepath), fields='{base}/fields.csv'.format(base=self.basepath), meta='{base}/meta.json'.format(base=self.basepath))
self._logger.info('Saving arguments to %s', self.paths['meta'])
if os.path.exists(self.paths['meta']):
self._logger.warning('Path to meta file already exists. Not overriding meta.')
else:
self._save_metadata()
self._logger.info('Saving messages to %s', self.paths['msg'])
if os.path.exists(self.paths['msg']):
self._logger.warning('Path to message file already exists. New data will be appended.')
fhandle = logging.FileHandler(self.paths['msg'])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info('Saving logs data to %s', self.paths['logs'])
self._logger.info("Saving logs' fields to %s", self.paths['fields'])
if os.path.exists(self.paths['logs']):
self._logger.warning('Path to log file already exists. Old data will be deleted.')
os.remove(self.paths['logs'])
with open(self.paths['fields'], 'r') as csvfile:
reader = csv.reader(csvfile)
self.fieldnames = list(reader)[0]
else:
self.fieldnames = ['_tick', '_time']
def log(self, to_log: Dict, tick: int=None, verbose: bool=False) -> None:
if (tick is not None):
raise NotImplementedError
else:
to_log['_tick'] = self._tick
self._tick += 1
to_log['_time'] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if (k not in self.fieldnames):
self.fieldnames.append(k)
if (old_len != len(self.fieldnames)):
with open(self.paths['fields'], 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(self.fieldnames)
self._logger.info('Updated log fields: %s', self.fieldnames)
if (to_log['_tick'] == 0):
with open(self.paths['logs'], 'a') as f:
f.write(('# %s\n' % ','.join(self.fieldnames)))
if verbose:
self._logger.info('LOG | %s', ', '.join(['{}: {}'.format(k, to_log[k]) for k in sorted(to_log)]))
with open(self.paths['logs'], 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldnames)
writer.writerow(to_log)
def close(self, successful: bool=True) -> None:
self.metadata['date_end'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
self.metadata['successful'] = successful
self._save_metadata()
def _save_metadata(self) -> None:
with open(self.paths['meta'], 'w') as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True) |
class Model_combination(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder_spec2midi = encoder
self.decoder_spec2midi = decoder
def forward(self, input_spec):
enc_vector = self.encoder_spec2midi(input_spec)
(output_onset_A, output_offset_A, output_mpe_A, output_velocity_A, output_onset_B, output_offset_B, output_mpe_B, output_velocity_B) = self.decoder_spec2midi(enc_vector)
return (output_onset_A, output_offset_A, output_mpe_A, output_velocity_A, output_onset_B, output_offset_B, output_mpe_B, output_velocity_B) |
def make_sdfg(transB: bool, alpha: float, beta: float, implementation: str, dtype) -> dace.SDFG:
sdfg = dace.SDFG(name='CSRMM')
sdfg.add_array('A_val', shape=(NNZ,), dtype=dtype, transient=False)
sdfg.add_array('A_row', shape=((N + 1),), dtype=dace.int32, transient=False)
sdfg.add_array('A_col', shape=(NNZ,), dtype=dace.int32, transient=False)
sdfg.add_array('C', shape=(N, K), dtype=dtype, transient=False)
if transB:
sdfg.add_array('B', shape=(K, M), dtype=dtype, transient=False)
else:
sdfg.add_array('B', shape=(M, K), dtype=dtype, transient=False)
state = sdfg.add_state('state', is_start_state=True)
a_row_node = state.add_access('A_row')
a_col_node = state.add_access('A_col')
a_val_node = state.add_access('A_val')
B_node = state.add_access('B')
C_node = state.add_access('C')
library_node = CSRMM('csrmm', transB=transB, alpha=alpha, beta=beta)
library_node.implementation = implementation
state.add_node(library_node)
state.add_edge(a_val_node, None, library_node, '_a_vals', dace.Memlet.from_array('A_val', sdfg.arrays['A_val']))
state.add_edge(a_row_node, None, library_node, '_a_rows', dace.Memlet.from_array('A_row', sdfg.arrays['A_row']))
state.add_edge(a_col_node, None, library_node, '_a_cols', dace.Memlet.from_array('A_col', sdfg.arrays['A_col']))
state.add_edge(B_node, None, library_node, '_b', dace.Memlet.from_array('B', sdfg.arrays['B']))
state.add_edge(library_node, '_c', C_node, None, dace.Memlet.from_array('C', sdfg.arrays['C']))
if (beta != 0):
cin_node = state.add_access('C')
state.add_edge(cin_node, None, library_node, '_cin', dace.Memlet.from_array('C', sdfg.arrays['C']))
sdfg.expand_library_nodes()
sdfg.validate()
return sdfg |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, sequence_a_segment_id=0, sequence_b_segment_id=1, mask_padding_with_zero=True):
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d of %d' % (ex_index, len(examples))))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
special_tokens_count = (4 if sep_token_extra else 3)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - special_tokens_count))
else:
special_tokens_count = (3 if sep_token_extra else 2)
if (len(tokens_a) > (max_seq_length - special_tokens_count)):
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
tokens = (tokens_a + [sep_token])
if sep_token_extra:
tokens += [sep_token]
segment_ids = ([sequence_a_segment_id] * len(tokens))
if tokens_b:
tokens += (tokens_b + [sep_token])
segment_ids += ([sequence_b_segment_id] * (len(tokens_b) + 1))
if cls_token_at_end:
tokens = (tokens + [cls_token])
segment_ids = (segment_ids + [cls_token_segment_id])
else:
tokens = ([cls_token] + tokens)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
else:
input_ids = (input_ids + ([pad_token] * padding_length))
input_mask = (input_mask + ([(0 if mask_padding_with_zero else 1)] * padding_length))
segment_ids = (segment_ids + ([pad_token_segment_id] * padding_length))
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
if (output_mode == 'classification'):
label_id = label_map[example.label]
elif (output_mode == 'regression'):
label_id = float(example.label)
else:
raise KeyError(output_mode)
if (ex_index < 5):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
logger.info(('label: %s (id = %d)' % (example.label, label_id)))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
return features |
.parametrize('freeze', [True, False])
.parametrize('use_gamma', [True, False])
def test_trainble_config(freeze, use_gamma, flair_lm):
flair_config = FlairConfig(flair_lm=flair_lm, freeze=freeze, use_gamma=use_gamma)
flair_embedder = flair_config.instantiate()
expected_num_trainable_params = 0
if (not freeze):
expected_num_trainable_params += count_params(flair_lm, return_trainable=False)
if use_gamma:
expected_num_trainable_params += 1
assert (count_params(flair_embedder) == expected_num_trainable_params) |
def _get_random_pose_object_with_tf_posebody(num_keypoints: int, frames_min: int=1, frames_max: int=10) -> Pose:
(tensor, mask, confidence) = _create_random_tensorflow_data(frames_min=frames_min, frames_max=frames_max, num_keypoints=num_keypoints)
masked_tensor = MaskedTensor(tensor=tensor, mask=mask)
body = TensorflowPoseBody(fps=10, data=masked_tensor, confidence=confidence)
header = _create_pose_header(width=10, height=7, depth=0, num_components=3, num_keypoints=num_keypoints)
return Pose(header=header, body=body) |
def attr_acc(gt_box: DetectionBox, pred_box: DetectionBox) -> float:
if (gt_box.attribute_name == ''):
acc = np.nan
else:
acc = float((gt_box.attribute_name == pred_box.attribute_name))
return acc |
def make_plots(statistics_file):
print('\n Make Plots')
with open(statistics_file, 'r') as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
FILETYPE = 'eps'
numRows = len(configX)
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ['ssim', 'lpips']
numCols = len(statTags)
(fig, axs) = plt.subplots(numRows, numCols, squeeze=False, sharex=True, figsize=(7, (1 + (2 * numRows))))
legend_handles = []
legend_names = []
for row in range(numRows):
local_stat = stats[row]
axs[(row, 0)].set_ylabel(configX[row][0])
for (col, (name, tag)) in enumerate(zip(statNames, statTags)):
ax = axs[(row, col)]
if (row == 0):
ax.set_title(name)
(X, Xlabel) = (None, None)
for (network_label, network_channels, network_layers) in networkX:
X = []
Xlabel = []
Y = []
err = []
for (i, (fn, f)) in enumerate(fourierX[:(- 1)]):
filename = ('fourier-world-%s-%s-%s' % (configX[row][0], network_label, fn))
(y, e) = local_stat[filename][tag]
X.append(i)
Xlabel.append(('%.1f' % f))
Y.append(y)
err.append(e)
h = ax.errorbar(X, Y, yerr=err)
filename = ('fourier-world-%s-%s-%s' % (configX[row][0], network_label, fourierX[(- 1)][0]))
(y, e) = local_stat[filename][tag]
ax.errorbar([(X[(- 1)] + 1.5)], [y], yerr=[e], color=h[0].get_color(), fmt='.')
X.append((X[(- 1)] + 1.5))
Xlabel.append('NeRF')
if ((row == 0) and (col == 0)):
legend_handles.append(h)
legend_names.append(f'{network_channels} channels, {network_layers} layers')
ax.set_xticks(X)
ax.set_xticklabels(Xlabel)
ax.set_xlabel('Fourier std $\\sigma^2$')
tag = 'lpips'
worst_lpips = 0
worst_filename = None
best_lpips = 1
best_filename = None
for (network_label, network_channels, network_layers) in networkX:
for (i, (fn, f)) in enumerate(fourierX[:(- 1)]):
filename = ('fourier-world-%s-%s-%s' % (configX[row][0], network_label, fn))
(y, _) = local_stat[filename][tag]
if (y < best_lpips):
best_lpips = y
best_filename = filename
if (y > worst_lpips):
worst_lpips = y
worst_filename = filename
shutil.copyfile(os.path.join(output_folder, ('images_%s/reference/reference000.png' % configX[row][0])), os.path.join(output_folder, ('%s_reference.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], best_filename))), os.path.join(output_folder, ('%s_best.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], worst_filename))), os.path.join(output_folder, ('%s_worst.png' % configX[row][0])))
lgd = fig.legend(legend_handles, legend_names, loc='upper center', bbox_to_anchor=(0.5, 0.05), ncol=len(legend_handles))
fig.savefig(os.path.join(output_folder, ('Fourier-SSIM.%s' % FILETYPE)), bbox_inches='tight', bbox_extra_artists=(lgd,))
print('Done')
plt.show() |
class G2PModel(object):
def __init__(self, params, file_path='', is_training=False):
usr_dir.import_usr_dir(os.path.dirname(os.path.abspath(__file__)))
self.params = params
self.file_path = file_path
if (not os.path.exists(self.params.model_dir)):
os.makedirs(self.params.model_dir)
self.problem = registry._PROBLEMS[self.params.problem_name](self.params.model_dir, file_path=file_path, is_training=is_training)
trainer_utils.log_registry()
self.frozen_graph_filename = os.path.join(self.params.model_dir, 'frozen_model.pb')
self.first_ex = False
if is_training:
(self.train_preprocess_file_path, self.dev_preprocess_file_path) = (None, None)
(self.estimator, self.decode_hp) = self.__prepare_decode_model()
elif os.path.exists(self.frozen_graph_filename):
(self.estimator, self.decode_hp) = self.__prepare_decode_model()
self.__load_graph()
self.checkpoint_path = tf.train.latest_checkpoint(self.params.model_dir)
else:
(self.estimator, self.decode_hp) = self.__prepare_decode_model()
def prepare_datafiles(self, train_path, dev_path):
(self.train_preprocess_file_path, self.dev_preprocess_file_path) = self.problem.generate_preprocess_data(train_path, dev_path)
def __prepare_decode_model(self):
hparams = trainer_utils.create_hparams(self.params.hparams_set, self.params.data_dir, passed_hparams=self.params.hparams)
(estimator, _) = g2p_trainer_utils.create_experiment_components(params=self.params, hparams=hparams, run_config=trainer_utils.create_run_config(self.params.model_dir), problem_instance=self.problem)
decode_hp = decoding.decode_hparams(self.params.decode_hparams)
decode_hp.add_hparam('shards', 1)
return (estimator, decode_hp)
def __prepare_interactive_model(self):
word = self.__get_word()
self.first_ex = True
self.decode_word(word)
self.first_ex = False
prob_choice = np.array(0).astype(np.int32)
def input_fn():
gen_fn = make_input_fn(self.inputs, prob_choice)
example = gen_fn()
example = decoding._interactive_input_tensor_to_features_dict(example, self.estimator.params)
return example
self.input_fn = input_fn
if os.path.exists(self.frozen_graph_filename):
return
with estimator_lib.ops.Graph().as_default() as g:
self.features = self.estimator._get_features_from_input_fn(input_fn, estimator_lib.model_fn_lib.ModeKeys.PREDICT)
hooks = estimator_lib._check_hooks_type(None)
checkpoint_path = estimator_lib.saver.latest_checkpoint(self.params.model_dir)
if (not checkpoint_path):
raise ValueError('Could not find trained model in model_dir: {}.'.format(self.params.model_dir))
estimator_lib.random_seed.set_random_seed(self.estimator._config.tf_random_seed)
self.estimator._create_and_assert_global_step(g)
self.estimator_spec = self.estimator._call_model_fn(self.features, None, estimator_lib.model_fn_lib.ModeKeys.PREDICT, self.estimator.config)
self.mon_sess = estimator_lib.training.MonitoredSession(session_creator=estimator_lib.training.ChiefSessionCreator(checkpoint_filename_with_path=checkpoint_path, scaffold=self.estimator_spec.scaffold, config=self.estimator._session_config), hooks=hooks)
pronunciations = self.decode_word(word)
print('Pronunciations: {}'.format(pronunciations))
def decode_word(self, word):
num_samples = 1
decode_length = 100
p_hparams = self.estimator.params.problems[0]
vocabulary = p_hparams.vocabulary['inputs']
const_array_size = 10000
input_ids = vocabulary.encode(word)
input_ids.append(text_encoder.EOS_ID)
self.inputs = ([num_samples, decode_length, len(input_ids)] + input_ids)
assert (len(self.inputs) < const_array_size)
self.inputs += ([0] * (const_array_size - len(self.inputs)))
if self.first_ex:
return
res_iter = self.estimator.predict(self.input_fn)
result = res_iter.next()
pronunciations = []
if self.decode_hp.return_beams:
beams = np.split(result['outputs'], self.decode_hp.beam_size, axis=0)
for (k, beam) in enumerate(beams):
tf.logging.info(('BEAM %d:' % k))
beam_string = self.problem.target_vocab.decode(decoding._save_until_eos(beam, is_image=False))
pronunciations.append(beam_string)
tf.logging.info(beam_string)
elif self.decode_hp.identity_output:
tf.logging.info(' '.join(map(str, result['outputs'].flatten())))
else:
res = result['outputs'].flatten()
if (text_encoder.EOS_ID in res):
index = list(res).index(text_encoder.EOS_ID)
res = res[0:index]
pronunciations.append(self.problem.target_vocab.decode(res))
return pronunciations
def __run_op(self, sess, decode_op, feed_input):
saver = tf.train.import_meta_graph((self.checkpoint_path + '.meta'), import_scope=None, clear_devices=True)
saver.restore(sess, self.checkpoint_path)
inp = tf.placeholder(tf.string, name='inp_decode')[0]
results = sess.run(decode_op, feed_dict={'inp_decode:0': [feed_input]})
return results
def __get_word(self):
word = ''
try:
word = input('> ')
if (not issubclass(type(word), text_type)):
word = text_type(word, encoding='utf-8', errors='replace')
except EOFError:
pass
if (not word):
pass
return word
def train(self):
g2p_trainer_utils.run(params=self.params, problem_instance=self.problem, train_preprocess_file_path=self.train_preprocess_file_path, dev_preprocess_file_path=self.dev_preprocess_file_path)
def interactive(self):
self.__prepare_interactive_model()
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name='inp_decode')[0]
decode_op = tf.py_func(self.decode_word, [inp], tf.string)
while True:
word = self.__get_word()
result = self.__run_op(sess, decode_op, word)
print(('output: ' + result))
else:
while (not self.mon_sess.should_stop()):
self.__get_word()
pronunciations = self.decode_word(word)
print('Pronunciations: {}'.format(pronunciations))
def decode(self, output_file_path):
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name='inp_decode')[0]
decode_op = tf.py_func(self.__decode_from_file, [inp], [tf.string, tf.string])
[inputs, decodes] = self.__run_op(sess, decode_op, self.file_path)
else:
(inputs, decodes) = self.__decode_from_file(self.file_path)
if output_file_path:
tf.logging.info(('Writing decodes into %s' % output_file_path))
outfile = tf.gfile.Open(output_file_path, 'w')
if self.decode_hp.return_beams:
for index in range(len(inputs)):
for res in decodes[index]:
outfile.write(('%s\t%s%s' % (inputs[index], res, self.decode_hp.delimiter)))
else:
for index in range(len(inputs)):
outfile.write(('%s\t%s%s' % (inputs[index], decodes[index], self.decode_hp.delimiter)))
def evaluate(self):
(words, pronunciations) = ([], [])
for case in self.problem.generator(self.file_path, self.problem.source_vocab, self.problem.target_vocab):
word = self.problem.source_vocab.decode(case['inputs']).replace(EOS, '').strip()
pronunciation = self.problem.target_vocab.decode(case['targets']).replace(EOS, '').strip()
words.append(word)
pronunciations.append(pronunciation)
self.g2p_gt_map = create_g2p_gt_map(words, pronunciations)
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name='inp_decode')[0]
decode_op = tf.py_func(self.calc_errors, [inp], [tf.int64, tf.int64])
[correct, errors] = self.__run_op(sess, decode_op, self.file_path)
else:
(correct, errors) = self.calc_errors(g2p_gt_map, self.file_path)
print(('Words: %d' % (correct + errors)))
print(('Errors: %d' % errors))
print(('WER: %.3f' % (float(errors) / (correct + errors))))
print(('Accuracy: %.3f' % float((1.0 - (float(errors) / (correct + errors))))))
return self.g2p_gt_map
def freeze(self):
checkpoint = tf.train.get_checkpoint_state(self.params.model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_folder = '/'.join(input_checkpoint.split('/')[:(- 1)])
output_graph = (absolute_model_folder + '/frozen_model.pb')
output_node_names = ['transformer/body/model/parallel_0/body/decoder/layer_0/self_attention/multihead_attention/dot_product_attention/Softmax', 'transformer/body/model/parallel_0/body/decoder/layer_0/encdec_attention/multihead_attention/dot_product_attention/Softmax', 'transformer/body/model/parallel_0/body/decoder/layer_1/self_attention/multihead_attention/dot_product_attention/Softmax', 'transformer/body/model/parallel_0/body/decoder/layer_1/encdec_attention/multihead_attention/dot_product_attention/Softmax']
clear_devices = True
saver = tf.train.import_meta_graph((input_checkpoint + '.meta'), clear_devices=clear_devices)
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
with open('/home/nurtas/input_nodes.txt', 'w') as ofile:
for op in graph.get_operations():
ofile.write((op.name + '\n'))
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
output_graph_def = graph_util.convert_variables_to_constants(sess, input_graph_def, output_node_names, variable_names_blacklist=['global_step'])
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print(('%d ops in the final graph.' % len(output_graph_def.node)))
def __load_graph(self):
with tf.gfile.GFile(self.frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as self.graph:
tf.import_graph_def(graph_def, name='import')
def __decode_from_file(self, filename):
if (not self.decode_hp.batch_size):
self.decode_hp.batch_size = 32
tf.logging.info(('decode_hp.batch_size not specified; default=%d' % self.decode_hp.batch_size))
hparams = self.estimator.params
problem_id = self.decode_hp.problem_idx
inputs_vocab = hparams.problems[problem_id].vocabulary['inputs']
targets_vocab = hparams.problems[problem_id].vocabulary['targets']
problem_name = 'grapheme_to_phoneme_problem'
tf.logging.info('Performing decoding from a file.')
inputs = _get_inputs(filename)
num_decode_batches = (((len(inputs) - 1) // self.decode_hp.batch_size) + 1)
def input_fn():
input_gen = _decode_batch_input_fn(problem_id, num_decode_batches, inputs, inputs_vocab, self.decode_hp.batch_size, self.decode_hp.max_input_size)
gen_fn = decoding.make_input_fn_from_generator(input_gen)
example = gen_fn()
return decoding._decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = self.estimator.predict(input_fn)
for result in result_iter:
if self.decode_hp.return_beams:
beam_decodes = []
output_beams = np.split(result['outputs'], self.decode_hp.beam_size, axis=0)
for (k, beam) in enumerate(output_beams):
tf.logging.info(('BEAM %d:' % k))
(decoded_outputs, _) = decoding.log_decode_results(result['inputs'], beam, problem_name, None, inputs_vocab, targets_vocab)
beam_decodes.append(decoded_outputs)
decodes.append(beam_decodes)
else:
(decoded_outputs, _) = decoding.log_decode_results(result['inputs'], result['outputs'], problem_name, None, inputs_vocab, targets_vocab)
decodes.append(decoded_outputs)
return [inputs, decodes]
def calc_errors(self, decode_file_path):
(inputs, decodes) = self.__decode_from_file(decode_file_path)
(correct, errors) = (0, 0)
for (index, word) in enumerate(inputs):
if self.decode_hp.return_beams:
beam_correct_found = False
for beam_decode in decodes[index]:
if (beam_decode in self.g2p_gt_map[word]):
beam_correct_found = True
break
if beam_correct_found:
correct += 1
else:
errors += 1
elif (decodes[index] in self.g2p_gt_map[word]):
correct += 1
else:
errors += 1
return (correct, errors) |
def SMKernel(Q, input_dim, active_dims=None, variances=None, frequencies=None, lengthscales=None, max_freq=1.0, max_len=1.0, ARD=False):
if (variances is None):
variances = [(1.0 / Q) for _ in range(Q)]
if (frequencies is None):
frequencies = [(np.random.rand(input_dim) * max_freq) for _ in range(Q)]
if (lengthscales is None):
lengthscales = [np.abs((max_len * np.random.randn((input_dim if ARD else 1)))) for _ in range(Q)]
kerns = [SMKernelComponent(input_dim, active_dims=active_dims, variance=variances[i], frequency=frequencies[i], lengthscales=lengthscales[i], ARD=ARD) for i in range(Q)]
return Sum(kerns) |
class NNEmptyEntityPredictor():
def __init__(self):
self.nlp = spacy.load('en_core_web_lg')
self.ref = self.construct_reference()
self.ref = [(x + (self.nlp(x[0]),)) for x in self.ref]
def construct_reference(self):
dataset = load_json('outputs/grailqa_v1.0_train.json')
empty_pairs = []
for (i, d) in enumerate(dataset):
s_expr = d['s_expression']
toks = tokenize_s_expr(s_expr)
entities = [x for x in toks if (x.startswith('m.') or x.startswith('g.'))]
if ('^^ in s_expr):
continue
if (d['answer'] == 'null'):
continue
if (len(entities) == 0):
answer_set = [a['answer_argument'] for a in d['answer']]
empty_pairs.append((d['question'], d['s_expression'], answer_set))
return empty_pairs
def predict(self, qid, query):
scores = []
x = self.nlp(query)
for (i, y) in enumerate(self.ref):
s = x.similarity(y[(- 1)])
scores.append((i, s))
scores.sort(key=(lambda x: x[1]), reverse=True)
predicted_idx = scores[0][0]
pred = self.ref[predicted_idx]
return {'qid': qid, 'logical_form': pred[1], 'answer': pred[2]} |
class Dataset(object):
def __init__(self, dataset):
self.K = 3
if (dataset == 'synthetic'):
(seq_list, label_list) = prepare_dataset(self.K)
else:
assert False, 'does not exists dataset: {}.'.format(dataset)
self.L = seq_list[0].shape[0]
(self.seq_list, self.label_list) = shuffle_samples(seq_list, label_list)
n_training = int((len(self.seq_list) * 0.8))
(self.train_seq, self.train_label) = (np.array(self.seq_list[:n_training]), self.label_list[:n_training])
(self.test_seq, self.test_label) = (np.array(self.seq_list[n_training:]), self.label_list[n_training:])
print('dataset size: train={}, test={}'.format(len(self.train_seq), len(self.test_seq)))
def gen_next_batch(self, batch_size, is_train_set=True, epoch=None, iteration=None):
if (is_train_set == True):
x = self.train_seq
y = self.train_label
else:
x = self.test_seq
y = self.test_label
assert (len(x) >= batch_size), 'batch size must be smaller than data size: {}.'.format(len(x))
if (epoch != None):
until = math.ceil((float((epoch * len(x))) / float(batch_size)))
elif (iteration != None):
until = iteration
else:
assert False, 'epoch or iteration must be set.'
iter_ = 0
index_list = [i for i in range(len(x))]
while (iter_ <= until):
idxs = random.sample(index_list, batch_size)
iter_ += 1
(yield (x[idxs], y[idxs], idxs)) |
class PAU_VGG(nn.Module):
def __init__(self, vgg_name):
super(PAU_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 100)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), PAU()]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, use_norm=True):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
if use_norm:
self.fc = NormedLinear((512 * block.expansion), num_classes)
else:
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
out = x.view(x.size(0), (- 1))
out_linear = self.fc(out)
return (out_linear, out) |
def find_group_ends(width, next):
next.next()
bufs = deque()
while True:
event = (yield)
if bufs:
if (event[0] == Doc.GEnd):
(_, buf) = bufs.pop()
buf.append_left((Doc.GBegin, event[1]))
buf.append((Doc.GEnd, event[1]))
if bufs:
bufs[(- 1)][1].extend(buf)
else:
for e in buf:
next.send(e)
else:
if (event[0] == Doc.GBegin):
bufs.append(((event[1] + width), Buf()))
else:
bufs[(- 1)][1].append(event)
while ((bufs[0][0] < event[1]) or (len(bufs) > width)):
next.send((Doc.GBegin, None))
(_, buf) = bufs.popleft()
for e in buf:
next.send(e)
if (not bufs):
break
elif (event[0] == Doc.GBegin):
bufs.append(((event[1] + width), Buf()))
else:
next.send(event) |
def consolidate_scores(cv_results, scores, metric):
if (metric == 'MAPE'):
scores[metric].append(f'{value.mean():.2f} {value.std():.2f}')
else:
scores[metric].append(f'{value.mean():.1f} {value.std():.1f}')
return scores |
def _alg_key(self, algorithm=None, recompute=False):
if recompute:
algorithm = self._get_algorithm(algorithm)
return algorithm |
class SA(nn.Module):
def __init__(self, __C):
super().__init__()
self.mhatt = MHAtt(__C)
self.ffn = FFN(__C)
self.dropout1 = nn.Dropout(__C.DROPOUT_R)
self.norm1 = LayerNorm(__C.HIDDEN_SIZE)
self.dropout2 = nn.Dropout(__C.DROPOUT_R)
self.norm2 = LayerNorm(__C.HIDDEN_SIZE)
def forward(self, y, y_mask):
y = self.norm1((y + self.dropout1(self.mhatt(y, y, y, y_mask))))
y = self.norm2((y + self.dropout2(self.ffn(y))))
return y |
_HEADS_REGISTRY.register()
class CascadeROIHeads(StandardROIHeads):
def __init__(self, *, box_in_features: List[str], box_pooler: ROIPooler, box_heads: List[nn.Module], box_predictors: List[nn.Module], proposal_matchers: List[Matcher], **kwargs):
assert ('proposal_matcher' not in kwargs), "CascadeROIHeads takes 'proposal_matchers=' for each stage instead of one 'proposal_matcher='."
kwargs['proposal_matcher'] = proposal_matchers[0]
num_stages = self.num_cascade_stages = len(box_heads)
box_heads = nn.ModuleList(box_heads)
box_predictors = nn.ModuleList(box_predictors)
assert (len(box_predictors) == num_stages), f'{len(box_predictors)} != {num_stages}!'
assert (len(proposal_matchers) == num_stages), f'{len(proposal_matchers)} != {num_stages}!'
super().__init__(box_in_features=box_in_features, box_pooler=box_pooler, box_head=box_heads, box_predictor=box_predictors, **kwargs)
self.proposal_matchers = proposal_matchers
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret.pop('proposal_matcher')
return ret
def _init_box_head(cls, cfg, input_shape):
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(((1.0 / input_shape[k].stride) for k in in_features))
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS
assert (len(cascade_bbox_reg_weights) == len(cascade_ious))
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, 'CascadeROIHeads only support class-agnostic regression now!'
assert (cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0])
in_channels = [input_shape[f].channels for f in in_features]
assert (len(set(in_channels)) == 1), in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type)
pooled_shape = ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution)
(box_heads, box_predictors, proposal_matchers) = ([], [], [])
for (match_iou, bbox_reg_weights) in zip(cascade_ious, cascade_bbox_reg_weights):
box_head = build_box_head(cfg, pooled_shape)
box_heads.append(box_head)
box_predictors.append(FastRCNNOutputLayers(cfg, box_head.output_shape, box2box_transform=Box2BoxTransform(weights=bbox_reg_weights)))
proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False))
return {'box_in_features': in_features, 'box_pooler': box_pooler, 'box_heads': box_heads, 'box_predictors': box_predictors, 'proposal_matchers': proposal_matchers}
def forward(self, images, features, proposals, targets=None):
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
if self.training:
losses = self._forward_box(features, proposals, targets)
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return (proposals, losses)
else:
pred_instances = self._forward_box(features, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return (pred_instances, {})
def _forward_box(self, features, proposals, targets=None):
features = [features[f] for f in self.box_in_features]
head_outputs = []
prev_pred_boxes = None
image_sizes = [x.image_size for x in proposals]
for k in range(self.num_cascade_stages):
if (k > 0):
proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes)
if self.training:
proposals = self._match_and_label_boxes(proposals, k, targets)
predictions = self._run_stage(features, proposals, k)
prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)
head_outputs.append((self.box_predictor[k], predictions, proposals))
if self.training:
losses = {}
storage = get_event_storage()
for (stage, (predictor, predictions, proposals)) in enumerate(head_outputs):
with storage.name_scope('stage{}'.format(stage)):
stage_losses = predictor.losses(predictions, proposals)
losses.update({(k + '_stage{}'.format(stage)): v for (k, v) in stage_losses.items()})
return losses
else:
scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
scores = [(sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)) for scores_per_image in zip(*scores_per_stage)]
(predictor, predictions, proposals) = head_outputs[(- 1)]
boxes = predictor.predict_boxes(predictions, proposals)
(pred_instances, _) = fast_rcnn_inference(boxes, scores, image_sizes, predictor.test_score_thresh, predictor.test_nms_thresh, predictor.test_topk_per_image)
return pred_instances
_grad()
def _match_and_label_boxes(self, proposals, stage, targets):
(num_fg_samples, num_bg_samples) = ([], [])
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, proposals_per_image.proposal_boxes)
(matched_idxs, proposal_labels) = self.proposal_matchers[stage](match_quality_matrix)
if (len(targets_per_image) > 0):
gt_classes = targets_per_image.gt_classes[matched_idxs]
gt_classes[(proposal_labels == 0)] = self.num_classes
gt_boxes = targets_per_image.gt_boxes[matched_idxs]
else:
gt_classes = (torch.zeros_like(matched_idxs) + self.num_classes)
gt_boxes = Boxes(targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)))
proposals_per_image.gt_classes = gt_classes
proposals_per_image.gt_boxes = gt_boxes
num_fg_samples.append((proposal_labels == 1).sum().item())
num_bg_samples.append((proposal_labels.numel() - num_fg_samples[(- 1)]))
storage = get_event_storage()
storage.put_scalar('stage{}/roi_head/num_fg_samples'.format(stage), (sum(num_fg_samples) / len(num_fg_samples)))
storage.put_scalar('stage{}/roi_head/num_bg_samples'.format(stage), (sum(num_bg_samples) / len(num_bg_samples)))
return proposals
def _run_stage(self, features, proposals, stage):
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = _ScaleGradient.apply(box_features, (1.0 / self.num_cascade_stages))
box_features = self.box_head[stage](box_features)
return self.box_predictor[stage](box_features)
def _create_proposals_from_boxes(self, boxes, image_sizes):
boxes = [Boxes(b.detach()) for b in boxes]
proposals = []
for (boxes_per_image, image_size) in zip(boxes, image_sizes):
boxes_per_image.clip(image_size)
if self.training:
boxes_per_image = boxes_per_image[boxes_per_image.nonempty()]
prop = Instances(image_size)
prop.proposal_boxes = boxes_per_image
proposals.append(prop)
return proposals |
class SynonymPerturbation(TextPerturbation):
(frozen=True)
class Description(PerturbationDescription):
prob: float = 0.0
name: str = 'synonym'
FILE_NAME: str = 'wordnet_synonyms.json'
SOURCE_URI: str = '
def __init__(self, prob: float):
self.prob: float = prob
try:
self.spacy_model = spacy.load('en_core_web_sm')
except OSError:
spacy.cli.download('en_core_web_sm')
self.spacy_model = spacy.load('en_core_web_sm')
output_dir = os.path.join('benchmark_output', 'perturbations', self.name)
Path(output_dir).mkdir(parents=True, exist_ok=True)
nltk.data.path.append(output_dir)
try:
wordnet.ensure_loaded()
except LookupError:
if (not os.path.exists(os.path.join(output_dir, 'corpora/wordnet'))):
nltk.download('wordnet', download_dir=output_dir)
if (not os.path.exists(os.path.join(output_dir, 'corpora/omw-1.4'))):
nltk.download('omw-1.4', download_dir=output_dir)
wordnet.ensure_loaded()
target_path = os.path.join(output_dir, self.FILE_NAME)
ensure_file_downloaded(source_url=self.SOURCE_URI, target_path=target_path)
with open(target_path) as f:
self.wordnet_synonyms: Dict[(str, List[str])] = json.load(f)
def description(self) -> PerturbationDescription:
return SynonymPerturbation.Description(name=self.name, robustness=True, prob=self.prob)
def perturb(self, text: str, rng: Random) -> str:
spacy_to_wordnet_pos = {'VERB': 'v', 'NOUN': 'n', 'ADV': 'r', 'ADJ': 's'}
doc = self.spacy_model(text)
perturbed_text = ''
for token in doc:
word = token.text
wordnet_pos = spacy_to_wordnet_pos.get(token.pos_)
synonyms = []
if wordnet_pos:
for base in wordnet._morphy(word.lower(), wordnet_pos):
synonyms.extend(self.wordnet_synonyms.get(f'{base}:{wordnet_pos}', []))
synonyms = [s for s in synonyms if (s != word.lower())]
synonyms = list(dict.fromkeys(synonyms))
if (synonyms and (rng.uniform(0, 1) < self.prob)):
synonym = rng.choice(synonyms)
word = match_case(word, synonym)
perturbed_text += (word + token.whitespace_)
return perturbed_text |
def plot_line(df, x, y, col, row, hue, name, ci=None, hue_order=model_names, title=None, xlabel=None, ylabel=None, marker=None):
g = sns.relplot(data=df, x=x, y=y, col=col, row=row, hue=hue, kind='line', facet_kws={'sharey': False}, hue_order=hue_order, ci=ci, marker=marker)
g.set_titles(title).set_ylabels(ylabel, clear_inner=False).set_xlabels(xlabel, clear_inner=False).despine(left=True)
sns.move_legend(g, 'lower center', bbox_to_anchor=(0.5, (- 0.02)), ncol=4, title=None, frameon=False, fontsize=11)
plt.savefig(name, bbox_inches='tight')
plt.close() |
def stretch_loss(inp_nf, out_nf, deform, x=None, npoints=1000, dim=3, use_surf_points=False, invert_sampling=False, loss_type='l2', reduction='mean', weights=1, detach_weight=True, use_rejection=False):
if (x is None):
(x, weights) = sample_points(npoints, dim=dim, sample_surf_points=use_surf_points, inp_nf=inp_nf, out_nf=out_nf, deform=deform, invert_sampling=invert_sampling, detach_weight=detach_weight, use_rejection=use_rejection)
(bs, npoints) = (x.size(0), x.size(1))
else:
assert (weights is not None)
if (len(x.size()) == 2):
(bs, npoints) = (1, x.size(0))
else:
(bs, npoints) = (x.size(0), x.size(1))
x = x.view(bs, npoints, dim)
if x.is_leaf:
x.requires_grad = True
x.retain_grad()
y_out = out_nf(x)
(_, P) = tangential_projection_matrix(y_out, x)
P = P.view((bs * npoints), dim, dim)
x_inp = deform(x).view(bs, npoints, dim)
(J, J_status) = jacobian(x_inp, x)
J = J.view((bs * npoints), dim, dim)
I = torch.eye(dim).view(1, dim, dim).to(J)
diff = (I - torch.bmm(J.transpose(1, 2), J))
diff = torch.bmm(P.transpose(1, 2), torch.bmm(diff, P))
F_norm = diff.view((bs * npoints), (- 1)).norm(dim=(- 1), keepdim=False)
F_norm = F_norm.view(bs, npoints)
F_norm = (F_norm * weights)
if (loss_type == 'l2'):
loss = F.mse_loss(F_norm, torch.zeros_like(F_norm), reduction=reduction)
elif (loss_type == 'l1'):
loss = F.l1_loss(F_norm, torch.zeros_like(F_norm), reduction=reduction)
else:
raise ValueError
return loss |
def sgn_committee(K, N, alpha, ensemble_type, p_pos, noise_var):
if isinstance(p_pos, float):
p_pos = ([p_pos] * K)
if ((not isinstance(p_pos, list)) or (len(p_pos) != K)):
raise ValueError(f'p_pos must be a list of length {K}')
priors = [dict(prior_type='binary', p_pos=p) for p in p_pos]
activation1 = activation2 = 'sgn'
return committee(K, N, alpha, ensemble_type, priors, activation1, activation2, noise_var) |
class MemcachedBackend(BaseStorageBackend):
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if (sys_path is not None):
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError('Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg)
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath, encoding=None):
raise NotImplementedError |
def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']]
with layer_config_kwargs(kwargs):
model_kwargs = dict(block_args=decode_arch_def(arch_def), head_bias=False, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_kwargs=dict(gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_model(model_kwargs, variant, pretrained)
return model |
class SawyerStickPushV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'stick_pos': obs[3:6], 'obj_pos': obs[6:(- 3)], 'goal_pos': obs[(- 3):]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_pow': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_xyz(o_d), p=10.0)
action['grab_pow'] = self._grab_pow(o_d)
return action.array
def _desired_xyz(o_d):
hand_pos = o_d['hand_pos']
stick_pos = (o_d['stick_pos'] + np.array([(- 0.02), 0.0, 0.0]))
obj_pos = o_d['obj_pos']
goal_pos = o_d['goal_pos']
if (np.linalg.norm((hand_pos[:2] - stick_pos[:2])) > 0.02):
return (stick_pos + np.array([0.0, 0.0, 0.1]))
elif ((abs((hand_pos[2] - stick_pos[2])) > 0.05) and (stick_pos[(- 1)] < 0.03)):
return (stick_pos + np.array([0.0, 0.0, 0.03]))
elif (abs(((obj_pos[2] + 0.05) - hand_pos[2])) > 0.01):
return np.array([hand_pos[0], hand_pos[1], (obj_pos[2] + 0.05)])
else:
return np.array([goal_pos[0], goal_pos[1], hand_pos[2]])
def _grab_pow(o_d):
hand_pos = o_d['hand_pos']
stick_pos = (o_d['stick_pos'] + np.array([(- 0.02), 0.0, 0.0]))
if ((np.linalg.norm((hand_pos[:2] - stick_pos[:2])) > 0.02) or (abs((hand_pos[2] - stick_pos[2])) > 0.1)):
return 0.0
else:
return 0.8 |
def get_plane_params_in_local(planes, camera_info):
tran = camera_info['position']
rot = camera_info['rotation']
b = planes
a = (np.ones((len(planes), 3)) * tran)
planes_world = ((a + b) - (((a * b).sum(axis=1) / (np.linalg.norm(b, axis=1) ** 2)).reshape((- 1), 1) * b))
end = (quaternion.as_rotation_matrix(rot.inverse()) (planes_world - tran).T).T
planes_local = (end * np.array([1, (- 1), (- 1)]))
return planes_local |
def main(args):
all_files = glob.glob((args.file_dir + '/*.json'))
start = time.time()
stats_df = pd.DataFrame()
global_df = pd.DataFrame()
global_user_df = pd.DataFrame()
global_system_df = pd.DataFrame()
print('Reading files')
index = 0
for dialogue_json in all_files:
index += 1
df = read_json_to_df(dialogue_json)
df_flatten = flatten_json_column(df)
df_flatten = df_flatten[['speaker', 'type', 'question-type', 'question-subtype', 'nlg', 'images']]
df_flatten = df_flatten.assign(filename=dialogue_json)
df_flatten['num_images'] = df_flatten['images'].apply((lambda x: (len(x) if (type(x) is list) else None)))
df = df_flatten.replace(np.nan, '', regex=True)
df_flatten['state'] = df[['type', 'question-type', 'question-subtype']].apply((lambda x: ','.join(x)), axis=1)
df_flatten['nlp_stats'] = df_flatten['nlg'].apply((lambda x: (nlp_stats(x) if (type(x) is unicode) else None)))
df_flatten = flatten_dic_column(df_flatten, 'nlp_stats')
df_flatten['is_image'] = df_flatten['images'].apply((lambda x: (1 if (type(x) is list) else 0)))
df_flatten['is_nlg'] = df_flatten['nlg'].apply((lambda x: (1 if (type(x) is unicode) else 0)))
df_flatten['is_multimodal'] = ((df_flatten['is_nlg'] + df_flatten['is_image']) - 1)
user_df = df_flatten.loc[(df_flatten['speaker'] == 'user')]
system_df = df_flatten.loc[(df_flatten['speaker'] == 'system')]
image_turns = df_flatten['is_image'].sum()
nlg_turns = df_flatten['is_nlg'].sum()
multimodal_turns = df_flatten['is_multimodal'].sum()
total_turns = df_flatten.shape[0]
user_turns = user_df.shape[0]
sys_turns = system_df.shape[0]
user_nlg_turns = user_df['is_nlg'].sum()
sys_nlg_turns = system_df['is_nlg'].sum()
local_data = {'filename': dialogue_json, 'total_turns': total_turns, 'image_turns': image_turns, 'nlg_turns': nlg_turns, 'multimodal_turns': multimodal_turns, 'user_turns': user_turns, 'sys_turns': sys_turns, 'user_nlg_turns': user_nlg_turns, 'sys_nlg_turns': sys_nlg_turns}
local_df = pd.DataFrame(data=local_data, index=[index])
stats_df = append_df(stats_df, local_df, ignore_index=False)
global_df = append_df(global_df, df_flatten)
global_user_df = append_df(global_user_df, user_df)
global_system_df = append_df(global_system_df, system_df)
print('Writing files')
write_df_to_json(global_df, args.output_file_json)
save_df_pickle(global_df, args.output_file_pkl)
write_df_to_json(global_user_df, args.output_user_file_json)
save_df_pickle(global_user_df, args.output_user_file_pkl)
write_df_to_json(global_system_df, args.output_sys_file_json)
save_df_pickle(global_system_df, args.output_sys_file_pkl)
write_df_to_json(stats_df, args.stats_file_json)
save_df_pickle(stats_df, args.stats_file_pkl) |
class Encoder(object):
def __init__(self, cell_factory, input_size, hidden_size, input_dropout=None, output_dropout=None):
self.cell_factory = cell_factory
self.input_size = input_size
self.hidden_size = hidden_size
self.cell = self.cell_factory(self.hidden_size)
if ((input_dropout is not None) or (output_dropout is not None)):
self.cell = DropoutWrapper(self.cell, (1 - (input_dropout or 0.0)), (1 - (output_dropout or 0.0)))
self.state_size = self.cell.state_size
def __call__(self, inputs, start_state, scope=None):
with vs.variable_scope((scope or 'Encoder')):
return rnn_encoder_factory(self.cell, inputs, start_state) |
def _insert_value(metadata, name, value):
if (value is None):
return metadata
metadata[name] = value
return metadata |
_method
class RealSet(UniqueRepresentation, Parent, Set_base, Set_boolean_operators, Set_add_sub_operators):
def __classcall__(cls, *args, **kwds):
normalized = kwds.pop('normalized', False)
if normalized:
return UniqueRepresentation.__classcall__(cls, *args, normalized=True)
manifold_keywords = ('structure', 'ambient', 'names', 'coordinate')
if any((kwds.get(kwd, None) for kwd in manifold_keywords)):
real_set = cls.__classcall__(cls, *args)
ambient = kwds.pop('ambient', None)
structure = kwds.pop('structure', 'differentiable')
if (structure != 'differentiable'):
raise NotImplementedError
from sage.manifolds.differentiable.examples.real_line import RealLine
if real_set.is_universe():
if (ambient is None):
ambient = RealLine(**kwds)
else:
pass
return ambient
name = kwds.pop('name', None)
latex_name = kwds.pop('latex_name', None)
if (ambient is None):
ambient = RealLine(**kwds)
else:
pass
if (name is None):
name = str(real_set)
if (latex_name is None):
from sage.misc.latex import latex
latex_name = latex(real_set)
return ambient.manifold().canonical_chart().pullback(real_set, name=name, latex_name=latex_name)
if kwds:
raise TypeError(f'RealSet constructors cannot take the keyword arguments {kwds}')
from sage.structure.element import Expression
if ((len(args) == 1) and isinstance(args[0], RealSet)):
return args[0]
intervals = []
if (len(args) == 2):
try:
(lower, upper) = args
lower.n()
upper.n()
args = (RealSet._prep(lower, upper),)
except (AttributeError, ValueError, TypeError):
pass
for arg in args:
if isinstance(arg, tuple):
(lower, upper) = RealSet._prep(*arg)
intervals.append(InternalRealInterval(lower, False, upper, False))
elif isinstance(arg, list):
(lower, upper) = RealSet._prep(*arg)
intervals.append(InternalRealInterval(lower, True, upper, True))
elif isinstance(arg, InternalRealInterval):
intervals.append(arg)
elif isinstance(arg, RealSet):
intervals.extend(arg._intervals)
elif (isinstance(arg, Expression) and arg.is_relational()):
from operator import eq, ne, lt, gt, le, ge
def rel_to_interval(op, val):
oo = infinity
try:
val = val.pyobject()
except AttributeError:
pass
val = RLF(val)
if (op == eq):
s = [InternalRealInterval(val, True, val, True)]
elif (op == gt):
s = [InternalRealInterval(val, False, oo, False)]
elif (op == ge):
s = [InternalRealInterval(val, True, oo, False)]
elif (op == lt):
s = [InternalRealInterval((- oo), False, val, False)]
elif (op == le):
s = [InternalRealInterval((- oo), False, val, True)]
elif (op == ne):
s = [InternalRealInterval((- oo), False, val, False), InternalRealInterval(val, False, oo, False)]
else:
raise ValueError((str(arg) + ' does not determine real interval'))
return [i for i in s if (not i.is_empty())]
if (arg.lhs().is_symbol() and (arg.rhs().is_numeric() or arg.rhs().is_constant()) and arg.rhs().is_real()):
intervals.extend(rel_to_interval(arg.operator(), arg.rhs()))
elif (arg.rhs().is_symbol() and (arg.lhs().is_numeric() or arg.lhs().is_constant()) and arg.lhs().is_real()):
op = arg.operator()
if (op == lt):
op = gt
elif (op == gt):
op = lt
elif (op == le):
op = ge
elif (op == ge):
op = le
intervals.extend(rel_to_interval(op, arg.lhs()))
else:
raise ValueError((str(arg) + ' does not determine real interval'))
else:
from sage.manifolds.differentiable.examples.real_line import OpenInterval
from sage.manifolds.subsets.closure import ManifoldSubsetClosure
if isinstance(arg, OpenInterval):
(lower, upper) = RealSet._prep(arg.lower_bound(), arg.upper_bound())
intervals.append(InternalRealInterval(lower, False, upper, False))
elif (isinstance(arg, ManifoldSubsetClosure) and isinstance(arg._subset, OpenInterval)):
interval = arg._subset
(lower, upper) = RealSet._prep(interval.lower_bound(), interval.upper_bound())
ambient = interval.manifold()
(ambient_lower, ambient_upper) = RealSet._prep(ambient.lower_bound(), ambient.upper_bound())
lower_closed = (ambient_lower < lower)
upper_closed = (upper < ambient_upper)
intervals.append(InternalRealInterval(lower, lower_closed, upper, upper_closed))
else:
raise ValueError((str(arg) + ' does not determine real interval'))
union_intervals = RealSet.normalize(intervals)
return UniqueRepresentation.__classcall__(cls, *union_intervals, normalized=True)
def __init__(self, *intervals, normalized=True):
category = TopologicalSpaces()
if (len(intervals) <= 1):
category = category.Connected()
if all((i.is_point() for i in intervals)):
category = category.Subobjects().Finite()
else:
category = category.Infinite()
inf = intervals[0].lower()
sup = intervals[(- 1)].upper()
if (not ((len(intervals) == 1) and (inf is minus_infinity) and (sup is infinity))):
category = category.Subobjects()
if ((inf is not minus_infinity) and (sup is not infinity)):
if all(((i.lower_closed() and i.upper_closed()) for i in intervals)):
category = category.Compact()
Parent.__init__(self, category=category)
self._intervals = intervals
def __richcmp__(self, other, op):
if (not isinstance(other, RealSet)):
return NotImplemented
return richcmp(self._intervals, other._intervals, op)
def __iter__(self):
return iter(self._intervals)
def n_components(self):
return len(self._intervals)
def cardinality(self):
n = ZZ(0)
for interval in self._intervals:
if interval.is_point():
n += 1
else:
return infinity
return n
def is_empty(self):
return (len(self._intervals) == 0)
def is_universe(self):
return (self == self.ambient())
def get_interval(self, i):
return self._intervals[i]
__getitem__ = get_interval
def __bool__(self):
return (not self.is_empty())
def ambient(self):
return RealSet.real_line()
def lift(self, x):
return x
def retract(self, x):
if (x not in self):
raise ValueError(f'{x} is not an element of {self}')
return x
def normalize(intervals):
scan = merge(*[[i._scan_lower(), i._scan_upper()] for i in intervals])
union_intervals = tuple(RealSet._scan_to_intervals(scan, (lambda i: (i > 0))))
return union_intervals
def _repr_(self):
if (self.n_components() == 0):
return '{}'
else:
return ' '.join(map(repr, self._intervals))
def _latex_(self):
from sage.misc.latex import latex
if (self.n_components() == 0):
return '\\emptyset'
else:
return ' \\cup '.join((latex(i) for i in self._intervals))
def _sympy_condition_(self, variable):
x = variable
false = ((x == 0)._sympy_() & False)
if (self.n_components() == 0):
return false
else:
cond = false
for it in self._intervals:
cond = (cond | it._sympy_condition_(x))
return cond
def _giac_condition_(self, variable):
x = variable
false = 'false'
if (self.n_components() == 0):
return false
return ' or '.join((it._giac_condition_(x) for it in self._intervals))
def _prep(lower, upper=None):
if (lower == minus_infinity):
lower = minus_infinity
elif (lower == infinity):
lower = infinity
else:
lower = RLF(lower)
if (upper is None):
return lower
if (upper == minus_infinity):
upper = minus_infinity
elif (upper == infinity):
upper = infinity
else:
upper = RLF(upper)
if ((upper is infinity) or (lower is minus_infinity)):
return (lower, upper)
elif ((lower is infinity) or (upper is minus_infinity)):
return (upper, lower)
elif (upper < lower):
return (upper, lower)
else:
return (lower, upper)
def interval(lower, upper, *, lower_closed=None, upper_closed=None, **kwds):
if ((lower_closed is None) or (upper_closed is None)):
raise ValueError('lower_closed and upper_closed must be explicitly given')
(lower, upper) = RealSet._prep(lower, upper)
return RealSet(InternalRealInterval(lower, lower_closed, upper, upper_closed), **kwds)
def open(lower, upper, **kwds):
(lower, upper) = RealSet._prep(lower, upper)
return RealSet(InternalRealInterval(lower, False, upper, False), **kwds)
def closed(lower, upper, **kwds):
(lower, upper) = RealSet._prep(lower, upper)
return RealSet(InternalRealInterval(lower, True, upper, True), **kwds)
def point(p, **kwds):
p = RealSet._prep(p)
return RealSet(InternalRealInterval(p, True, p, True), **kwds)
def open_closed(lower, upper, **kwds):
(lower, upper) = RealSet._prep(lower, upper)
return RealSet(InternalRealInterval(lower, False, upper, True), **kwds)
def closed_open(lower, upper, **kwds):
(lower, upper) = RealSet._prep(lower, upper)
return RealSet(InternalRealInterval(lower, True, upper, False), **kwds)
def unbounded_below_closed(bound, **kwds):
bound = RealSet._prep(bound)
return RealSet(InternalRealInterval(minus_infinity, False, bound, True), **kwds)
def unbounded_below_open(bound, **kwds):
bound = RealSet._prep(bound)
return RealSet(InternalRealInterval(minus_infinity, False, RLF(bound), False), **kwds)
def unbounded_above_closed(bound, **kwds):
bound = RealSet._prep(bound)
return RealSet(InternalRealInterval(RLF(bound), True, infinity, False), **kwds)
def unbounded_above_open(bound, **kwds):
bound = RealSet._prep(bound)
return RealSet(InternalRealInterval(RLF(bound), False, infinity, False), **kwds)
def real_line(**kwds):
return RealSet(InternalRealInterval(minus_infinity, False, infinity, False), **kwds)
def _scan(self):
for i in self._intervals:
(yield i._scan_lower())
(yield i._scan_upper())
def _scan_to_intervals(scan, condition):
indicator = 0
(on_x, on_epsilon) = (None, None)
was_on = False
for event in scan:
((x, epsilon), delta) = event
indicator -= delta
now_on = condition(indicator)
if ((not was_on) and now_on):
(on_x, on_epsilon) = (x, epsilon)
elif (was_on and (not now_on)):
if ((on_x, on_epsilon) < (x, epsilon)):
lower_closed = (on_epsilon == 0)
upper_closed = (epsilon > 0)
(yield InternalRealInterval(on_x, lower_closed, x, upper_closed))
was_on = now_on
def union(self, *real_set_collection):
sets = [self]
if ((len(real_set_collection) == 1) and isinstance(real_set_collection[0], RealSet)):
sets.append(real_set_collection[0])
elif (len(real_set_collection) == 2):
(a, b) = real_set_collection
try:
a.n()
b.n()
sets.append(RealSet(a, b))
except (AttributeError, ValueError, TypeError):
sets.append(RealSet(a))
sets.append(RealSet(b))
else:
sets.extend([RealSet(_) for _ in real_set_collection])
scan = merge(*[real_set._scan() for real_set in sets])
intervals = tuple(RealSet._scan_to_intervals(scan, (lambda i: (i > 0))))
return RealSet(*intervals, normalized=True)
def intersection(self, *real_set_collection):
sets = [self]
if ((len(real_set_collection) == 1) and isinstance(real_set_collection[0], RealSet)):
sets.append(real_set_collection[0])
elif (len(real_set_collection) == 2):
(a, b) = real_set_collection
try:
a.n()
b.n()
sets.append(RealSet(a, b))
except (AttributeError, ValueError, TypeError):
sets.append(RealSet(a))
sets.append(RealSet(b))
else:
sets.extend([RealSet(_) for _ in real_set_collection])
n = len(sets)
scan = merge(*[real_set._scan() for real_set in sets])
intervals = tuple(RealSet._scan_to_intervals(scan, (lambda i: (i == n))))
return RealSet(*intervals, normalized=True)
def inf(self):
if (self.n_components() == 0):
return infinity
return self._intervals[0].lower()
def sup(self):
if (self.n_components() == 0):
return minus_infinity
return self._intervals[(- 1)].upper()
def complement(self):
return self.ambient().difference(self)
def difference(self, *other):
remove = [(pt, (- delta)) for (pt, delta) in RealSet(*other)._scan()]
scan = merge(self._scan(), remove)
intervals = tuple(RealSet._scan_to_intervals(scan, (lambda i: (i > 0))))
return RealSet(*intervals, normalized=True)
def symmetric_difference(self, *other):
scan = merge(self._scan(), RealSet(*other)._scan())
intervals = tuple(RealSet._scan_to_intervals(scan, (lambda i: (i == 1))))
return RealSet(*intervals, normalized=True)
def contains(self, x):
x = RLF(x)
for interval in self._intervals:
if interval.contains(x):
return True
return False
__contains__ = contains
def is_subset(self, *other):
return (RealSet(*other).intersection(self) == self)
is_included_in = deprecated_function_alias(31927, is_subset)
def _an_element_(self):
from sage.rings.infinity import AnInfinity
if (not self._intervals):
raise EmptySetError
i = self._intervals[0]
if isinstance(i.lower(), AnInfinity):
if isinstance(i.upper(), AnInfinity):
return ZZ.zero()
else:
return (i.upper() - 1)
if isinstance(i.upper(), AnInfinity):
return (i.lower() + 1)
if i.lower_closed():
return i.lower()
if i.upper_closed():
return i.upper()
return ((i.lower() + i.upper()) / ZZ(2))
def is_open(self):
return all((((not i.lower_closed()) and (not i.upper_closed())) for i in self._intervals))
def is_closed(self):
return all((((i.lower_closed() or (i.lower() is minus_infinity)) and (i.upper_closed() or (i.upper() is infinity))) for i in self._intervals))
def closure(self):
return RealSet(*[i.closure() for i in self._intervals])
def interior(self):
return RealSet(*[i.interior() for i in self._intervals])
def boundary(self):
return RealSet(*[RealSet.point(x) for i in self._intervals for x in i.boundary_points()])
def convex_hull(*real_set_collection):
lower_scan = ((infinity, 0), 1)
upper_scan = ((minus_infinity, 1), (- 1))
for real_set in real_set_collection:
s = RealSet(real_set)
if (s.n_components() > 0):
lower_s = s[0]._scan_lower()
if (lower_s < lower_scan):
lower_scan = lower_s
upper_s = s[(- 1)]._scan_upper()
if (upper_s > upper_scan):
upper_scan = upper_s
if (lower_scan < upper_scan):
(lower, lower_closed) = (lower_scan[0][0], (lower_scan[0][1] == 0))
(upper, upper_closed) = (upper_scan[0][0], (upper_scan[0][1] > 0))
return RealSet(InternalRealInterval(lower, lower_closed, upper, upper_closed))
else:
return RealSet()
def is_connected(self):
return (self.n_components() == 1)
def is_disjoint(self, *other):
other = RealSet(*other)
return self.are_pairwise_disjoint(self, other)
is_disjoint_from = deprecated_function_alias(31927, is_disjoint)
def are_pairwise_disjoint(*real_set_collection):
scan = merge(*[RealSet(real_set)._scan() for real_set in real_set_collection])
overlap_generator = RealSet._scan_to_intervals(scan, (lambda i: (i > 1)))
return (next(overlap_generator, None) is None)
def _sage_input_(self, sib, coerced):
def interval_input(i):
(lower, upper) = (i.lower(), i.upper())
if i.is_point():
return sib.name('RealSet.point')(lower)
elif ((lower == minus_infinity) and (upper == infinity)):
return sib.name('RealSet')(sib(minus_infinity), sib(infinity))
else:
if i.lower_closed():
if i.upper_closed():
t = 'RealSet.closed'
else:
t = 'RealSet.closed_open'
elif i.upper_closed():
t = 'RealSet.open_closed'
else:
t = 'RealSet.open'
return sib.name(t)(sib(lower), sib(upper))
if self.is_empty():
return sib.name('RealSet')()
else:
return sib.sum((interval_input(i) for i in self))
def __mul__(self, right):
if (not isinstance(right, RealSet)):
return RealSet(*[(e * right) for e in self])
elif (not isinstance(self, RealSet)):
return RealSet(*[(self * e) for e in right])
else:
return NotImplemented
def __rmul__(self, other):
return (self * other)
def _sympy_(self):
from sympy import Reals, Union
from sage.interfaces.sympy import sympy_init
sympy_init()
if self.is_universe():
return Reals
else:
return Union(*[interval._sympy_() for interval in self._intervals]) |
class Dropout2d(_DropoutNd):
def forward(self, input: Tensor) -> Tensor:
return F.dropout2d(input, self.p, self.training, self.inplace) |
def raise_duplicate_arg_error(old_arg, new_arg):
raise TypeError((((((('For the `' + new_arg) + '` argument, the layer received both the legacy keyword argument `') + old_arg) + '` and the Keras 2 keyword argument `') + new_arg) + '`. Stick to the latter!')) |
def BModel2MLIR(bmodel_file):
from debugger.atomic_dialect import BModel2MLIR
bmodel = dis.BModel(bmodel_file)
return BModel2MLIR(bmodel) |
def read_test_labels(fin):
label_map = {}
for (line_idx, line) in enumerate(fin):
if isinstance(line, bytes):
line = line.decode('utf-8')
pieces = line.split()
if (len(pieces) < 2):
continue
if (len(pieces) > 2):
raise ValueError(('Unexpected format at line %d: all label lines should be len==2\n%s' % (line_idx, line)))
(datum_id, label) = pieces
try:
label = convert_label(label)
except ValueError:
raise ValueError(('Unexpected test label %s at line %d\n%s' % (label, line_idx, line)))
label_map[datum_id] = label
return label_map |
class TupleConstraintTag(AbstractMetric):
def evaluate_single_no_special_case(self, target: list[list], prediction: list[list]) -> float:
target = map(sorted, target)
prediction = map(sorted, prediction)
target = map(tuple, target)
prediction = map(tuple, prediction)
count_targ_dict = Counter(target)
count_pred_dict = Counter(prediction)
cardinality = [(count_pred_dict[key] == count) for (key, count) in count_targ_dict.items()]
return round((sum(cardinality) / len(cardinality)), 3) |
def test_functional_operation_exceptions(functional_fx, functional_gx, functional_fxy):
with pytest.raises(TypeError):
a = (functional_fx ** functional_gx) |
def register_Ns3EpcS11SapGtpcMessage_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS11Sap::GtpcMessage const &', 'arg0')])
cls.add_instance_attribute('teid', 'uint32_t', is_const=False)
return |
def main():
parser = argparse.ArgumentParser(description='OGBL-PPA (MF)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=(64 * 1024))
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--eval_steps', type=int, default=1)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu')
device = torch.device(device)
dataset = PygLinkPropPredDataset(name='ogbl-ppa')
split_edge = dataset.get_edge_split()
data = dataset[0]
emb = torch.nn.Embedding(data.num_nodes, args.hidden_channels).to(device)
predictor = LinkPredictor(args.hidden_channels, args.hidden_channels, 1, args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbl-ppa')
loggers = {'': Logger(args.runs, args), '': Logger(args.runs, args), '': Logger(args.runs, args)}
for run in range(args.runs):
emb.reset_parameters()
predictor.reset_parameters()
optimizer = torch.optim.Adam((list(emb.parameters()) + list(predictor.parameters())), lr=args.lr)
for epoch in range(1, (1 + args.epochs)):
loss = train(emb.weight, predictor, split_edge, optimizer, args.batch_size)
if ((epoch % args.eval_steps) == 0):
results = test(emb.weight, predictor, split_edge, evaluator, args.batch_size)
for (key, result) in results.items():
loggers[key].add_result(run, result)
if ((epoch % args.log_steps) == 0):
for (key, result) in results.items():
(train_hits, valid_hits, test_hits) = result
print(key)
print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {(100 * train_hits):.2f}%, Valid: {(100 * valid_hits):.2f}%, Test: {(100 * test_hits):.2f}%')
for key in loggers.keys():
print(key)
loggers[key].print_statistics(run)
for key in loggers.keys():
print(key)
loggers[key].print_statistics() |
class ProcessorVariant(ABC):
OVERRIDE = False
def process(self, doc):
pass
def bulk_process(self, docs):
return [self.process(doc) for doc in docs] |
class SharedState(Freezable):
def __init__(self, network, spec, num_workers, start_time):
assert isinstance(network, NeuralNetwork)
self.network = network
self.spec = spec
self.num_workers = num_workers
self.multithreaded = (num_workers > 1)
self.start_time = start_time
self.mutex = multiprocessing.Lock()
if self.multithreaded:
self.more_work_queue = multiprocessing.Queue()
else:
self.more_work_queue = FakeQueue()
self.stars_in_progress = multiprocessing.Value('i', 0)
self.heap_sizes = multiprocessing.Array('i', num_workers)
self.num_lps = multiprocessing.Value('i', 0)
self.num_lps_enum = multiprocessing.Value('i', 0)
self.num_offloaded = multiprocessing.Value('i', 0)
self.finished_stars = multiprocessing.Value('i', 0)
self.unfinished_stars = multiprocessing.Value('i', 0)
self.finished_approx_stars = multiprocessing.Value('i', 0)
self.finished_work_frac = multiprocessing.Value('f', 0)
self.incorrect_overapprox_count = multiprocessing.Value('i', 0)
self.incorrect_overapprox_time = multiprocessing.Value('f', 0)
num_timers = len(Settings.RESULT_SAVE_TIMERS)
self.timer_secs = multiprocessing.Array('f', num_timers)
self.timer_counts = multiprocessing.Array('i', num_timers)
self.cur_layers = multiprocessing.Array('i', num_workers)
self.cur_neurons = multiprocessing.Array('i', num_workers)
self.finished_initial_overapprox = multiprocessing.Value('i', 0)
self.had_exception = multiprocessing.Value('i', 0)
self.had_timeout = multiprocessing.Value('i', 0)
self.should_exit = multiprocessing.Value('i', 0)
self.result = Result(network)
self.freeze_attrs()
def push_init(self, ss):
Timers.tic('push_init')
self.mutex.acquire()
self.put_queue(ss)
self.stars_in_progress.value = 1
self.mutex.release()
Timers.toc('push_init')
def put_queue(self, ss):
Timers.tic('put_queue')
if self.multithreaded:
ss.star.lpi.serialize()
self.more_work_queue.put(ss)
Timers.toc('put_queue')
def get_global_queue(self, block=True, timeout=None, skip_deserialize=False):
Timers.tic('get_global_queue')
try:
rv = self.more_work_queue.get(block=block, timeout=timeout)
if (self.multithreaded and (not skip_deserialize)):
rv.star.lpi.deserialize()
except queue.Empty:
rv = None
Timers.toc('get_global_queue')
return rv |
def complex_model():
random_uniform = initializers.random_uniform(0, 1)
inputs = Input(shape=(224, 224, 3))
x = SeparableConv2D(10, 6, padding='same', name='sep_conv2d1')(inputs)
x = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', moving_mean_initializer='random_normal', moving_variance_initializer=random_uniform, name='bn1')(x)
x = ReLU()(x)
x = SeparableConv2D(20, 12, padding='same', name='sep_conv2d2')(x)
x = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', moving_mean_initializer='random_normal', moving_variance_initializer=random_uniform, name='bn2')(x)
outputs = ReLU()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return (model, (((getattr(model.layers[1], DEPTHWISE_KERNEL).numpy().flatten().shape[0] + getattr(model.layers[1], POINTWISE_KERNEL).numpy().flatten().shape[0]) + getattr(model.layers[4], DEPTHWISE_KERNEL).numpy().flatten().shape[0]) + getattr(model.layers[4], POINTWISE_KERNEL).numpy().flatten().shape[0]), compute_output_size(model.layers[4].output_shape)) |
def main(argv=None):
tf.reset_default_graph()
keep_prob = tf.placeholder(tf.float32, name='keep_probabilty')
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='GTLabel')
Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)
Net.build(image, NUM_CLASSES, keep_prob)
Loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(GTLabel, squeeze_dims=[3]), logits=Net.Prob, name='Loss'))
trainable_var = tf.trainable_variables()
train_op = train(Loss, trainable_var)
TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, GTLabelDir=Train_Label_Dir, BatchSize=Batch_Size)
if UseValidationSet:
ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, GTLabelDir=Valid_Labels_Dir, BatchSize=Batch_Size)
sess = tf.Session()
print('Setting up Saver...')
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(logs_dir)
if (ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model restored...')
f = open(TrainLossTxtFile, 'w')
f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate)))
f.close()
if UseValidationSet:
f = open(ValidLossTxtFile, 'w')
f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate)))
f.close()
for itr in range(MAX_ITERATION):
(Images, GTLabels) = TrainReader.ReadAndAugmentNextBatch()
feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 0.5}
sess.run(train_op, feed_dict=feed_dict)
if (((itr % 500) == 0) and (itr > 0)):
print(('Saving Model to file in ' + logs_dir))
saver.save(sess, (logs_dir + 'model.ckpt'), itr)
if ((itr % 10) == 0):
feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1}
TLoss = sess.run(Loss, feed_dict=feed_dict)
print(((('Step ' + str(itr)) + ' Train Loss=') + str(TLoss)))
with open(TrainLossTxtFile, 'a') as f:
f.write(((('\n' + str(itr)) + '\t') + str(TLoss)))
f.close()
if (UseValidationSet and ((itr % 2000) == 0)):
SumLoss = np.float64(0.0)
NBatches = np.int(np.ceil((ValidReader.NumFiles / ValidReader.BatchSize)))
print((('Calculating Validation on ' + str(ValidReader.NumFiles)) + ' Images'))
for i in range(NBatches):
(Images, GTLabels) = ValidReader.ReadNextBatchClean()
feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1.0}
TLoss = sess.run(Loss, feed_dict=feed_dict)
SumLoss += TLoss
NBatches += 1
SumLoss /= NBatches
print(('Validation Loss: ' + str(SumLoss)))
with open(ValidLossTxtFile, 'a') as f:
f.write(((('\n' + str(itr)) + '\t') + str(SumLoss)))
f.close() |
class Bottle3d(nn.Module):
def __init__(self, in_channel, pred_dim, chans=64):
super(Bottle3d, self).__init__()
conv3d = []
self.out_chans = [chans, (2 * chans), (4 * chans)]
n_layers = len(self.out_chans)
for i in list(range(n_layers)):
if (i == 0):
in_dim = in_channel
else:
in_dim = self.out_chans[(i - 1)]
out_dim = self.out_chans[i]
conv3d.append(nn.Sequential(nn.Conv3d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=0), nn.LeakyReLU(), nn.BatchNorm3d(num_features=out_dim)))
self.conv3d = nn.ModuleList(conv3d)
hidden_dim = 1024
self.linear_layers = nn.Sequential(nn.Linear((((self.out_chans[(- 1)] * 2) * 2) * 2), hidden_dim), nn.LeakyReLU(), nn.Linear(hidden_dim, pred_dim))
def forward(self, feat):
(B, C, Z, Y, X) = list(feat.shape)
for conv3d_layer in self.conv3d:
feat = conv3d_layer(feat)
feat = feat.reshape(B, (- 1))
return feat |
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True)
def test_print_matrix():
x = ti.Matrix.field(2, 3, dtype=ti.f32, shape=())
y = ti.Vector.field(3, dtype=ti.f32, shape=3)
def func(k: ti.f32):
x[None][(0, 0)] = (- 1.0)
y[2] += 1.0
print('hello', x[None], 'world!')
print((y[2] * k), (x[None] / k), y[2])
func(233.3)
ti.sync() |
class GNNStackStage(nn.Module):
def __init__(self, dim_in, dim_out, num_layers):
super(GNNStackStage, self).__init__()
self.num_layers = num_layers
for i in range(num_layers):
if (cfg.gnn.stage_type == 'skipconcat'):
d_in = (dim_in if (i == 0) else (dim_in + (i * dim_out)))
else:
d_in = (dim_in if (i == 0) else dim_out)
layer = GNNLayer(d_in, dim_out)
self.add_module('layer{}'.format(i), layer)
def forward(self, batch):
for (i, layer) in enumerate(self.children()):
x = batch.x
batch = layer(batch)
if (cfg.gnn.stage_type == 'skipsum'):
batch.x = (x + batch.x)
elif ((cfg.gnn.stage_type == 'skipconcat') and (i < (self.num_layers - 1))):
batch.x = torch.cat([x, batch.x], dim=1)
if cfg.gnn.l2norm:
batch.x = F.normalize(batch.x, p=2, dim=(- 1))
return batch |
def cERGM2_subgraph(G):
termdict = dict()
maxterm = max([G.GetIntAttrDatN(i, 'term') for i in G.Nodes()])
maxterm_nodes = [node.GetId() for node in G.Nodes() if (G.GetIntAttrDatN(node, 'term') == maxterm)]
nodes = set(maxterm_nodes)
for i in maxterm_nodes:
termdict[i] = maxterm
newNodes = set(nodes)
for node in set(newNodes):
neighbours = snap.TIntV()
snap.GetNodesAtHop(G, node, 1, neighbours, True)
newNeighbours = (set(neighbours) - nodes)
for node in newNeighbours:
if (not termdict.has_key(node)):
termdict[node] = G.GetIntAttrDatN(node, 'term')
newNodes.update(newNeighbours)
nodes.update(newNodes)
NodeVec = snap.TIntV()
for node in nodes:
NodeVec.Add(node)
subgraphG = snap.GetSubGraph(G, NodeVec)
subgraphG.AddIntAttrN('term', (- 1))
for (nodeid, term) in termdict.iteritems():
subgraphG.AddIntAttrDatN(nodeid, term, 'term')
return subgraphG |
def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
ct_model = None
try:
check_requirements(('coremltools',))
import coremltools as ct
print(f'''
{prefix} starting export with coremltools {ct.__version__}...''')
f = file.with_suffix('.mlmodel')
model.train()
ts = torch.jit.trace(model, im, strict=False)
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=(1 / 255.0), bias=[0, 0, 0])])
ct_model.save(f)
print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
print(f'''
{prefix} export failure: {e}''')
return ct_model |
.experimental
def test_predict_empty_log(log):
model = NeuroMF()
model.fit(log)
model.predict(log.limit(0), 1) |
def save_configuration(config, binding, vars, output_file):
layouts = {'input': ('X', binding['X'].upper()), 'output': ('SB2', binding['SB2'].upper()), 'special_dims': {}, 'algorithms': {}}
for (opname, op) in vars.unmerged_ops.items():
if op.specials:
if ('Implementation' in op.specials):
output_var = op.outputs[0]
algo = config[opname]['Implementation']
if ('default' in algo):
algo = 'DEFAULT'
else:
algo = ('ALGO' + algo[2:])
layouts['algorithms'][output_var.upper()] = algo
else:
for special in op.specials:
if (op.name == 'softmax'):
entry = ('SM_' + special)
else:
entry = ((op.name + '_') + special)
entry = entry.upper()
layouts['special_dims'][entry] = config[opname][special].upper()
layouts_params_vars = ['WKQV', 'BKQV', 'WO', 'BO', 'S1', 'B1', 'LINB1', 'LINW1', 'S2', 'B2', 'LINB2', 'LINW2']
binding['BKQV'] = ('Q' + binding['BK'])
layouts['params'] = {var: binding[var].upper() for var in layouts_params_vars}
layouts['params'] = list(layouts['params'].items())
layouts_forward_interm_vars = ['KKQQVV', 'WKKWQQWVV', 'BETA', 'ALPHA', 'ATTN_DROP_MASK', 'ATTN_DROP', 'GAMMA', 'ATT', 'DROP1MASK', 'SB1', 'SB1_LINW1', 'DROP2', 'LIN1', 'DROP2_LINW2', 'LIN2', 'LN2', 'LN2STD', 'LN2DIFF', 'DROP2MASK', 'DROP3MASK', 'LN1', 'LN1STD', 'LN1DIFF']
binding['KKQQVV'] = ('Q' + binding['QQ'])
layouts['forward_interm'] = {var: binding[var].upper() for var in layouts_forward_interm_vars}
layouts['forward_interm'] = list(layouts['forward_interm'].items())
layouts_backward_interm_vars = ['DLN2', 'DRESID2', 'DLIN2', 'DDROP2', 'DLIN1', 'DLIN1_LINW1', 'DLN1', 'DRESID1', 'DATT', 'DXATT', 'DGAMMA', 'DATTN_DROP', 'DBETA', 'DKKQQVV']
layouts['backward_interm'] = {var: binding[var].upper() for var in layouts_backward_interm_vars}
layouts['backward_interm'] = list(layouts['backward_interm'].items())
with open(output_file, 'wb') as f:
pickle.dump(layouts, f) |
_dispatch
def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None):
return (Dispatchable(x, np.ndarray),) |
def warp_shfl_up_i32(val: template()):
global_tid = block.global_thread_idx()
WARP_SZ = 32
lane_id = (global_tid % WARP_SZ)
offset_j = 1
n = warp.shfl_up_i32(warp.active_mask(), val, offset_j)
if (lane_id >= offset_j):
val += n
offset_j = 2
n = warp.shfl_up_i32(warp.active_mask(), val, offset_j)
if (lane_id >= offset_j):
val += n
offset_j = 4
n = warp.shfl_up_i32(warp.active_mask(), val, offset_j)
if (lane_id >= offset_j):
val += n
offset_j = 8
n = warp.shfl_up_i32(warp.active_mask(), val, offset_j)
if (lane_id >= offset_j):
val += n
offset_j = 16
n = warp.shfl_up_i32(warp.active_mask(), val, offset_j)
if (lane_id >= offset_j):
val += n
return val |
def run_training(model, batcher, sess_context_manager, sv, summary_writer):
tf.logging.info('starting run_training')
with sess_context_manager as sess:
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
while True:
batch = batcher.next_batch()
tf.logging.info('running training step...')
t0 = time.time()
results = model.run_train_step(sess, batch)
t1 = time.time()
tf.logging.info('seconds for training step: %.3f', (t1 - t0))
loss = results['loss']
tf.logging.info('loss: %f', loss)
if (not np.isfinite(loss)):
raise Exception('Loss is not finite. Stopping.')
if FLAGS.coverage:
coverage_loss = results['coverage_loss']
tf.logging.info('coverage_loss: %f', coverage_loss)
summaries = results['summaries']
train_step = results['global_step']
summary_writer.add_summary(summaries, train_step)
if ((train_step % 100) == 0):
summary_writer.flush() |
def preprocess_for_lm_mappable(e: Dict[(str, Any)], tokenizer, header: str=DEFAULT_CONVERSATION_HEADER):
source = e['conversations']
conversation = sentences_to_formatted_conversation(header, source)
conversation_tokenized = _tokenize_fn([conversation], tokenizer)
input_ids = conversation_tokenized['input_ids'][0]
target = copy.deepcopy(input_ids)
tokenized_lens = _tokenize_fn(([header] + [s['value'] for s in source]), tokenizer)['input_ids_lens']
speakers = [sentence['from'] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
audio_encoding = preprocess_encodings(e['audio_encoding'], e['audio_encoding_shape'])
return dict(input_ids=input_ids, labels=target, audio_encoding=audio_encoding, example_id=e['id']) |
def test_ann_assign_supported_type():
a = ann_assign_supported_type()
assert (a.dtype == np.uint16) |
class LmdbBackend(BaseStorageBackend):
def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs):
try:
import lmdb
except ImportError:
raise ImportError('Please install lmdb to enable LmdbBackend.')
if isinstance(client_keys, str):
client_keys = [client_keys]
if isinstance(db_paths, list):
self.db_paths = [str(v) for v in db_paths]
elif isinstance(db_paths, str):
self.db_paths = [str(db_paths)]
assert (len(client_keys) == len(self.db_paths)), f'client_keys and db_paths should have the same length, but received {len(client_keys)} and {len(self.db_paths)}.'
self._client = {}
for (client, path) in zip(client_keys, self.db_paths):
self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs)
def get(self, filepath, client_key):
filepath = str(filepath)
assert (client_key in self._client), f'client_key {client_key} is not in lmdb clients.'
client = self._client[client_key]
with client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf
def get_text(self, filepath):
raise NotImplementedError |
def TemperatureCalibration(*args, **kwargs):
_top_level_deprecation_warning('TemperatureCalibration', 'calibration')
return calibration.TemperatureCalibration(*args, **kwargs) |
def _required_threejs_version():
import os
import sage.env
with open(os.path.join(sage.env.SAGE_EXTCODE, 'threejs', 'threejs-version.txt')) as f:
return f.read().strip() |
def preactresnet18(num_classes=10, dropout=False, stride=1, parallel=False):
return PreActResNet(PreActBlock, [2, 2, 2, 2], 64, num_classes, stride=stride) |
def finder_for_path(path):
result = None
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result |
def get_valid_stats(args, trainer, stats, saver):
stats['num_updates'] = trainer.get_num_updates()
if hasattr(saver.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = (max if args.maximize_best_checkpoint_metric else min)
stats[key] = best_function(saver.save_checkpoint.best, stats[args.best_checkpoint_metric])
return stats |
def register_Ns3EpcS1apSap_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS1apSap const &', 'arg0')])
return |
def get_mIoU(fakes, names, model, device, table_path='datasets/table.txt', data_dir='database/cityscapes', batch_size=1, num_workers=8, num_classes=19, use_tqdm=True):
fakes = torch.cat(fakes, dim=0)
fakes = util.tensor2im(fakes)
mAP = test(fakes, names, model, device, table_path=table_path, data_dir=data_dir, batch_size=batch_size, num_workers=num_workers, num_classes=num_classes, use_tqdm=use_tqdm)
return float(mAP) |
class Adam(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
amsgrad = group['amsgrad']
grads = []
states = []
exp_avg = []
exp_avg_sq = []
max_exp_avg_sq = []
params_with_grad = []
for p in group['params']:
if (p.grad is not None):
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
params_with_grad.append(p)
grads.append(p.grad)
for p in params_with_grad:
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq.append(state['max_exp_avg_sq'])
state['step'] += 1
states.append(state)
(beta1, beta2) = group['betas']
bias_correction1 = [(1 - (beta1 ** state['step'])) for state in states]
bias_correction2 = [(1 - (beta2 ** state['step'])) for state in states]
if (group['weight_decay'] != 0):
grads = torch._foreach_add(grads, params_with_grad, alpha=group['weight_decay'])
torch._foreach_mul_(exp_avg, beta1)
torch._foreach_add_(exp_avg, grads, alpha=(1 - beta1))
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grads, grads, (1 - beta2))
if amsgrad:
[torch.max(a, b, out=a) for (a, b) in zip(max_exp_avg_sq, exp_avg_sq)]
max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sq)
bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2]
torch._foreach_div_scalar_list_(max_exp_avg_sq_sqrt, bias_correction_sqrt)
denom = torch._foreach_add(max_exp_avg_sq_sqrt, group['eps'])
else:
exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sq)
bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2]
torch._foreach_div_scalar_list_(exp_avg_sq_sqrt, bias_correction_sqrt)
denom = torch._foreach_add(exp_avg_sq_sqrt, group['eps'])
step_size = [(group['lr'] / bc) for bc in bias_correction1]
for i in range(len(step_size)):
params_with_grad[i].addcdiv_(exp_avg[i], denom[i], value=(- step_size[i]))
return loss |
class MultipleOutputsMultipleTensorsNet(torch.nn.Module):
def __init__(self):
super(MultipleOutputsMultipleTensorsNet, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.linear = torch.nn.Linear(((3 * 32) * 32), 3)
self.conv2 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.linear(torch.flatten(x, 1))
x3 = torch.relu(x)
x4 = self.conv2(x)
x4 = torch.relu(x4)
return (x, x1, x2, x3, x4) |
def register_Ns3LteFfrAlgorithm_methods(root_module, cls):
cls.add_constructor([param('ns3::LteFfrAlgorithm const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDlBandwidth', 'uint8_t', [], is_const=True)
cls.add_method('GetFrCellTypeId', 'uint8_t', [], is_const=True)
cls.add_method('GetLteFfrRrcSapProvider', 'ns3::LteFfrRrcSapProvider *', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetLteFfrSapProvider', 'ns3::LteFfrSapProvider *', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUlBandwidth', 'uint8_t', [], is_const=True)
cls.add_method('SetDlBandwidth', 'void', [param('uint8_t', 'bw')])
cls.add_method('SetFrCellTypeId', 'void', [param('uint8_t', 'cellTypeId')])
cls.add_method('SetLteFfrRrcSapUser', 'void', [param('ns3::LteFfrRrcSapUser *', 's')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetLteFfrSapUser', 'void', [param('ns3::LteFfrSapUser *', 's')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetUlBandwidth', 'void', [param('uint8_t', 'bw')])
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DoGetAvailableDlRbg', 'std::vector< bool >', [], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoGetAvailableUlRbg', 'std::vector< bool >', [], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoGetMinContinuousUlBandwidth', 'uint8_t', [], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoGetTpc', 'uint8_t', [param('uint16_t', 'rnti')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoIsDlRbgAvailableForUe', 'bool', [param('int', 'rbId'), param('uint16_t', 'rnti')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoIsUlRbgAvailableForUe', 'bool', [param('int', 'rbId'), param('uint16_t', 'rnti')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoRecvLoadInformation', 'void', [param('ns3::EpcX2Sap::LoadInformationParams', 'params')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoReportDlCqiInfo', 'void', [param('ns3::FfMacSchedSapProvider::SchedDlCqiInfoReqParameters const &', 'params')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoReportUeMeas', 'void', [param('uint16_t', 'rnti'), param('ns3::LteRrcSap::MeasResults', 'measResults')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoReportUlCqiInfo', 'void', [param('ns3::FfMacSchedSapProvider::SchedUlCqiInfoReqParameters const &', 'params')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoReportUlCqiInfo', 'void', [param('std::map< unsigned short, std::vector< double > >', 'ulCqiMap')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DoSetBandwidth', 'void', [param('uint8_t', 'ulBandwidth'), param('uint8_t', 'dlBandwidth')], visibility='protected', is_virtual=True)
cls.add_method('DoSetCellId', 'void', [param('uint16_t', 'cellId')], visibility='protected', is_virtual=True)
cls.add_method('GetRbgSize', 'int', [param('int', 'dlbandwidth')], visibility='protected')
cls.add_method('Reconfigure', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True)
return |
def get_session_items(session):
items = []
for step in session:
if ('retrieved_items' in step):
items += step['retrieved_items']
return items |
class DownstreamExpert(nn.Module):
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
DATA_ROOT = self.datarc['root']
meta_data = self.datarc['meta_data']
self.fold = (self.datarc.get('test_fold') or kwargs.get('downstream_variant'))
if (self.fold is None):
self.fold = 'fold1'
print(f'[Expert] - using the testing fold: "{self.fold}". Ps. Use -o config.downstream_expert.datarc.test_fold=fold2 to change test_fold in config.')
train_path = os.path.join(meta_data, self.fold.replace('fold', 'Session'), 'train_meta_data.json')
print(f'[Expert] - Training path: {train_path}')
test_path = os.path.join(meta_data, self.fold.replace('fold', 'Session'), 'test_meta_data.json')
print(f'[Expert] - Testing path: {test_path}')
dataset = IEMOCAPDataset(DATA_ROOT, train_path, self.datarc['pre_load'])
trainlen = int(((1 - self.datarc['valid_ratio']) * len(dataset)))
lengths = [trainlen, (len(dataset) - trainlen)]
torch.manual_seed(0)
(self.train_dataset, self.dev_dataset) = random_split(dataset, lengths)
self.test_dataset = IEMOCAPDataset(DATA_ROOT, test_path, self.datarc['pre_load'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(input_dim=self.modelrc['projector_dim'], output_dim=dataset.class_num, **model_conf)
self.objective = nn.CrossEntropyLoss()
self.expdir = expdir
self.register_buffer('best_score', torch.zeros(1))
def get_downstream_name(self):
return self.fold.replace('fold', 'emotion')
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=collate_fn)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, filenames, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
(predicted, _) = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=(- 1)).indices
records['acc'] += (predicted_classid == labels).view((- 1)).cpu().float().tolist()
records['loss'].append(loss.item())
records['filename'] += filenames
records['predict'] += [self.test_dataset.idx2emotion[idx] for idx in predicted_classid.cpu().tolist()]
records['truth'] += [self.test_dataset.idx2emotion[idx] for idx in labels.cpu().tolist()]
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key in ['acc', 'loss']:
values = records[key]
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(f'emotion-{self.fold}/{mode}-{key}', average, global_step=global_step)
with open((Path(self.expdir) / 'log.log'), 'a') as f:
if (key == 'acc'):
print(f'{mode} {key}: {average}')
f.write(f'''{mode} at step {global_step}: {average}
''')
if ((mode == 'dev') and (average > self.best_score)):
self.best_score = (torch.ones(1) * average)
f.write(f'''New best on {mode} at step {global_step}: {average}
''')
save_names.append(f'{mode}-best.ckpt')
if (mode in ['dev', 'test']):
with open((Path(self.expdir) / f'{mode}_{self.fold}_predict.txt'), 'w') as file:
line = [f'''{f} {e}
''' for (f, e) in zip(records['filename'], records['predict'])]
file.writelines(line)
with open((Path(self.expdir) / f'{mode}_{self.fold}_truth.txt'), 'w') as file:
line = [f'''{f} {e}
''' for (f, e) in zip(records['filename'], records['truth'])]
file.writelines(line)
return save_names |
def load_vox_header(filename):
assert os.path.isfile(filename), ('file not found: %s' % filename)
if filename.endswith('.df'):
f_or_c = 'C'
elif filename.endswith('.sdf'):
f_or_c = 'C'
else:
f_or_c = 'F'
fin = open(filename, 'rb')
s = Vox()
s.dims[0] = struct.unpack('I', fin.read(4))[0]
s.dims[1] = struct.unpack('I', fin.read(4))[0]
s.dims[2] = struct.unpack('I', fin.read(4))[0]
s.res = struct.unpack('f', fin.read(4))[0]
n_elems = ((s.dims[0] * s.dims[1]) * s.dims[2])
s.grid2world = struct.unpack(('f' * 16), fin.read((16 * 4)))
s.grid2world = np.asarray(s.grid2world, dtype=np.float32).reshape([4, 4], order=f_or_c)
return s |
class LabelParameterization(nn.Module):
def __init__(self, n_samples, n_class, init='gaussian', mean=0.0, std=0.0001):
super(LabelParameterization, self).__init__()
self.n_samples = n_samples
self.n_class = n_class
self.init = init
self.s = nn.Parameter(torch.empty(n_samples, n_class, dtype=torch.float32))
self.t = nn.Parameter(torch.empty(n_samples, n_class, dtype=torch.float32))
self.history = torch.zeros(n_samples, n_class, dtype=torch.float32).cuda()
self.init_param(mean=mean, std=std)
def init_param(self, mean=0.0, std=0.0001):
if (self.init == 'gaussian'):
torch.nn.init.normal_(self.s, mean=mean, std=std)
torch.nn.init.normal_(self.s, mean=mean, std=std)
elif (self.init == 'zero'):
torch.nn.init.constant_(self.s, 0)
torch.nn.init.constant_(self.t, 0)
else:
raise TypeError('Label not initialized.')
def compute_loss(self):
param_y = ((self.s * self.s) - (self.t * self.t))
return (torch.linalg.norm(param_y, ord=2) ** 2)
def forward(self, feature, idx):
y = feature
param_y = ((self.s[idx] * self.s[idx]) - (self.t[idx] * self.t[idx]))
history = ((0.3 * param_y) + (0.7 * self.history[idx]))
self.history[idx] = history.detach()
assert (param_y.shape == y.shape), 'Label and param shape do not match.'
return ((y + history), y) |
def _build_import_library_x86():
(out_exists, out_file) = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
lib_name = ('python%d%d.lib' % tuple(sys.version_info[:2]))
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
if (not os.path.isfile(lib_file)):
if hasattr(sys, 'base_prefix'):
base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
elif hasattr(sys, 'real_prefix'):
base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
else:
base_lib = ''
if os.path.isfile(base_lib):
lib_file = base_lib
else:
log.warn('Cannot build import library: "%s" not found', lib_file)
return
log.info('Building import library (ARCH=x86): "%s"', out_file)
from numpy.distutils import lib2def
def_name = ('python%d%d.def' % tuple(sys.version_info[:2]))
def_file = os.path.join(sys.prefix, 'libs', def_name)
nm_cmd = ('%s %s' % (lib2def.DEFAULT_NM, lib_file))
nm_output = lib2def.getnm(nm_cmd)
(dlist, flist) = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = find_python_dll()
args = (dll_name, def_file, out_file)
cmd = ('dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args)
status = os.system(cmd)
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return |
def DistributedDataParallelCPU(*args, **kwargs):
import warnings
warnings.warn('torch.nn.parallel.DistributedDataParallelCPU is deprecated, please use torch.nn.parallel.DistributedDataParallel instead.')
return DistributedDataParallel(*args, **kwargs) |
class Configs(ConfigsTemplate):
def __init__(self, hparams_center, project_name):
super(Configs, self).__init__(hparams_center, project_name)
self['dev_list_file'] = os.path.join(self['processed_dir'], 'dev_list_file.txt')
if ('bert_pretrained_dir' in self):
self['vocab_file'] = join(self['bert_pretrained_dir'], 'vocab.txt')
self['init_checkpoint'] = join(self['bert_pretrained_dir'], 'bert_model.ckpt')
self['bert_config_file'] = join(self['bert_pretrained_dir'], 'bert_config.json')
if os.path.basename(self['bert_pretrained_dir'].strip().strip('/')):
self['do_lower_case'] = True
else:
self['do_lower_case'] = False
else:
self['vocab_file'] = 'none'
self['init_checkpoint'] = 'none'
self['bert_config_file'] = 'none'
self['do_lower_case'] = True
self.logging_hparams()
def _file_names(self):
processed_name = (self.get_params_str(['dataset']) + '_proprec.pickle')
if ((self['network_type'] is None) or (self['network_type'] == 'test')):
model_name = '_test'
else:
model_name_params = ['dataset', 'network_class', 'network_type', 'max_sequence_len']
if (self['model_class'] is not None):
model_name_params += self['model_class'].get_identity_param_list()
else:
print('fatal error: can not reach the model class')
model_name = self.get_params_str(model_name_params)
ckpt_name = 'model_file.ckpt'
log_name = (('log_' + ConfigsTemplate.time_suffix()) + '.txt')
raw_data_dir = join(self.project_dir, 'data/BFS')
if (self['dataset'] == 'e2e_wo_con'):
(train_data_name, dev_data_name) = ('train_proc_direct_1000_wo_con', 'dev_proc_direct_1000_subset_wo_con')
else:
raise AttributeError
test_data_name = 'test'
return (processed_name, model_name, ckpt_name, log_name, raw_data_dir, train_data_name, dev_data_name, test_data_name) |
def plot3D(bench, output_filename='plot3D.pdf', step=0.1):
def fn_arg0(ind):
return bench.fn(ind)[0][0]
fig = plt.figure(figsize=((4.0 * golden_ratio), 4.0))
ax = fig.add_subplot(111, projection='3d', azim=(- 19), elev=30, position=[0.25, 0.15, 0.7, 0.7])
X = np.arange(bench.ind_domain[0], bench.ind_domain[1], step)
Y = np.arange(bench.ind_domain[0], bench.ind_domain[1], step)
(X, Y) = np.meshgrid(X, Y)
Z = np.fromiter(map(fn_arg0, zip(X.flat, Y.flat)), dtype=np.float, count=(X.shape[0] * X.shape[1])).reshape(X.shape)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap('inferno_r'), linewidth=0.2)
ax.set_xlabel('x0', fontsize=14)
ax.set_ylabel('x1', fontsize=14)
ax.set_zlabel('Fitness', fontsize=14)
for t in ax.xaxis.get_major_ticks():
t.label.set_fontsize(14)
for t in ax.yaxis.get_major_ticks():
t.label.set_fontsize(14)
for t in ax.zaxis.get_major_ticks():
t.label.set_fontsize(14)
plt.tight_layout()
fig.savefig(output_filename)
plt.close(fig) |
class TestTuner(unittest.TestCase):
def test_tuner_runs(self):
def eval_config(params):
return 0.5
search_space = {'param1': [0.0, 1.0, 2.0], 'param2': {'range': (10.0, 20.0)}}
tuner = RandomTuner(search_space, eval_config, budget=50) |
def gen_adv(net, eps):
global trainloader
net.eval()
(inputs, targets) = next(trainloader)
(inputs, targets) = (inputs.to(device), targets.to(device))
inputs.requires_grad = True
outputs = net(inputs)
(_, predicted) = torch.max(outputs, 1)
loss = criterion(outputs, targets)
grad = torch.autograd.grad(loss, [inputs])[0]
with torch.no_grad():
adv_inputs = (inputs + (eps * grad.sign()))
adv_outputs = net(adv_inputs)
(_, adv_predicted) = torch.max(adv_outputs, 1)
cnt_correct = 0
cnt_fool = 0
for i in range(inputs.size(0)):
if (predicted[i] == targets[i]):
cnt_correct += 1
if (adv_predicted[i] != targets[i]):
cnt_fool += 1
return ((float(cnt_fool) / cnt_correct), adv_inputs, targets, predicted, adv_predicted) |
def subs_all(f, sub, simplify=False):
singleton = False
if (not isinstance(f, (list, tuple))):
f = [f]
singleton = True
if (not isinstance(sub, (list, tuple))):
sub = [sub]
g = []
for ff in f:
for D in sub:
if isinstance(ff, dict):
ff = {k: ff[k].subs(D) for k in ff}
else:
ff = ff.subs(D)
g.append(ff)
if (singleton and simplify):
if isinstance(g[0], dict):
return g[0]
return g[0].simplify()
if (singleton and (not simplify)):
return g[0]
if ((not singleton) and simplify):
G = []
for gg in g:
if isinstance(gg, dict):
G.append(gg)
else:
G.append(gg.simplify())
return G
return g |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.