code stringlengths 101 5.91M |
|---|
class CopyToMap(xf.SingleStateTransformation):
a = xf.PatternNode(nodes.AccessNode)
b = xf.PatternNode(nodes.AccessNode)
def expressions(cls):
return [sdutil.node_path_graph(cls.a, cls.b)]
def can_be_applied(self, graph: SDFGState, expr_index: int, sdfg: SDFG, permissive: bool=False) -> bool:
if (not isinstance(self.a.desc(sdfg), data.Array)):
return False
if (not isinstance(self.b.desc(sdfg), data.Array)):
return False
if isinstance(self.a.desc(sdfg), data.View):
if (sdutil.get_view_node(graph, self.a) == self.b):
return False
if isinstance(self.b.desc(sdfg), data.View):
if (sdutil.get_view_node(graph, self.b) == self.a):
return False
if (self.a.desc(sdfg).strides == self.b.desc(sdfg).strides):
return False
return True
def delinearize_linearize(self, desc: data.Array, copy_shape: Tuple[symbolic.SymbolicType], rng: subsets.Range) -> Tuple[symbolic.SymbolicType]:
indices = [symbolic.pystr_to_symbolic(f'__i{i}') for i in range(len(copy_shape))]
if (tuple(desc.shape) == tuple(copy_shape)):
return subsets.Range([(ind, ind, 1) for ind in indices])
if (rng is not None):
indices = rng.coord_at(indices)
linear_index = sum(((indices[i] * data._prod(copy_shape[(i + 1):])) for i in range(len(indices))))
cur_index = ([0] * len(desc.shape))
divide_by = 1
for i in reversed(range(len(desc.shape))):
cur_index[i] = ((linear_index / divide_by) % desc.shape[i])
divide_by = (divide_by * desc.shape[i])
return subsets.Range([(ind, ind, 1) for ind in cur_index])
def apply(self, state: SDFGState, sdfg: SDFG):
adesc = self.a.desc(sdfg)
bdesc = self.b.desc(sdfg)
edge = state.edges_between(self.a, self.b)[0]
if (len(adesc.shape) >= len(bdesc.shape)):
copy_shape = edge.data.get_src_subset(edge, state).size()
copy_a = True
else:
copy_shape = edge.data.get_dst_subset(edge, state).size()
copy_a = False
maprange = {f'__i{i}': (0, (s - 1), 1) for (i, s) in enumerate(copy_shape)}
av = self.a.data
bv = self.b.data
avnode = self.a
bvnode = self.b
if copy_a:
a_index = [symbolic.pystr_to_symbolic(f'__i{i}') for i in range(len(copy_shape))]
b_index = self.delinearize_linearize(bdesc, copy_shape, edge.data.get_dst_subset(edge, state))
else:
a_index = self.delinearize_linearize(adesc, copy_shape, edge.data.get_src_subset(edge, state))
b_index = [symbolic.pystr_to_symbolic(f'__i{i}') for i in range(len(copy_shape))]
a_subset = subsets.Range([(ind, ind, 1) for ind in a_index])
b_subset = subsets.Range([(ind, ind, 1) for ind in b_index])
schedule = dtypes.ScheduleType.Default
if ((adesc.storage == dtypes.StorageType.GPU_Global) or (bdesc.storage == dtypes.StorageType.GPU_Global)):
if is_devicelevel_gpu(sdfg, state, self.a):
schedule = dtypes.ScheduleType.Sequential
else:
schedule = dtypes.ScheduleType.GPU_Device
(t, _, _) = state.add_mapped_tasklet('copy', maprange, dict(__inp=Memlet(data=av, subset=a_subset)), '__out = __inp', dict(__out=Memlet(data=bv, subset=b_subset)), schedule, external_edges=True, input_nodes={av: avnode}, output_nodes={bv: bvnode})
t.in_connectors['__inp'] = adesc.dtype
t.out_connectors['__out'] = bdesc.dtype
state.remove_edge(edge) |
class ColoredWrapper():
SUCCESS = '\x1b[92m'
STATUS = '\x1b[94m'
WARNING = '\x1b[93m'
ERROR = '\x1b[91m'
BOLD = '\x1b[1m'
END = '\x1b[0m'
def __init__(self, prefix, logger, verbose=True, propagte=False):
self.verbose = verbose
self.propagte = propagte
self.prefix = prefix
self._logging = logger
def debug(self, message):
if self.verbose:
self._print(message, ColoredWrapper.STATUS)
if self.propagte:
self._logging.debug(message)
def info(self, message):
self._print(message, ColoredWrapper.SUCCESS)
if self.propagte:
self._logging.info(message)
def warning(self, message):
self._print(message, ColoredWrapper.WARNING)
if self.propagte:
self._logging.warning(message)
def error(self, message):
self._print(message, ColoredWrapper.ERROR)
if self.propagte:
self._logging.error(message)
def critical(self, message):
self._print(message, ColoredWrapper.ERROR)
if self.propagte:
self._logging.critical(message)
def _print(self, message, color):
timestamp = datetime.datetime.now().strftime('%H:%M:%S.%f')
click.echo(f'{color}{ColoredWrapper.BOLD}[{timestamp}]{ColoredWrapper.END} {ColoredWrapper.BOLD}{self.prefix}{ColoredWrapper.END} {message}') |
def get_iw():
_a = data.ply_where((X.method == 'iw-base')).ply_select('*', test_metric=X.MSE)
_cv = (cv_group + ['method'])
_result = pd.DataFrame(columns=_a.columns)
for alpha in _a.alpha.unique():
_aa = _a.ply_where((X.alpha == alpha))
_aa['method'] = (_aa['method'] + _aa['alpha'].apply((lambda alpha: f'(alpha={alpha})')))
_result = _result.append(_aa)
_result = VirtualValidation(_result).fit(_cv, [('loocv_score', {'larger_is_better': False})])
return _result[(cv_group + ['target_c', 'method', 'test_metric'])] |
class UniSpeechPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class NBestSeparateOutputHandler(OutputHandler):
name = 'nbest_sep'
def __init__(self, path, args):
super(NBestSeparateOutputHandler, self).__init__()
self.paths = [(((path + '_') + str(i)) + '.txt') for i in range(max(args.nbest, 1))]
def write_hypos(self, all_hypos, sen_indices=None):
if (not self.f):
self.open_file()
for hypos in all_hypos:
while (len(hypos) < len(self.f)):
hypos.append(hypos[(- 1)])
for i in range(len(self.f)):
self.f[i].write(io_utils.decode(hypos[i].trgt_sentence))
self.f[i].write('\n')
self.f[i].flush()
def open_file(self):
self.f = []
for p in self.paths:
self.f.append(codecs.open(p, 'w', encoding='utf-8'))
def close_file(self):
for f in self.f:
f.close() |
def test_bytemasked():
array = ak.Array(ak.contents.ByteMaskedArray(ak.index.Index8(np.array([0, 1, 0, 1], dtype=np.int64)), tuple, valid_when=True))
assert ak.is_tuple(array)
array = ak.Array(ak.contents.ByteMaskedArray(ak.index.Index8(np.array([0, 1, 0, 1], dtype=np.int64)), record, valid_when=True))
assert (not ak.is_tuple(array)) |
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x |
class HardTanhChannel(PiecewiseLinearChannel):
def __init__(self):
neg = dict(zmin=(- np.inf), zmax=(- 1), slope=0, x0=(- 1))
mid = dict(zmin=(- 1), zmax=(+ 1), slope=1, x0=0)
pos = dict(zmin=1, zmax=np.inf, slope=0, x0=1)
super().__init__(name='h-tanh', regions=[pos, mid, neg]) |
def iob_iobes(tags):
new_tags = []
for (i, tag) in enumerate(tags):
if (tag == 'O'):
new_tags.append(tag)
elif (tag.split('-')[0] == 'B'):
if (((i + 1) != len(tags)) and (tags[(i + 1)].split('-')[0] == 'I')):
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif (tag.split('-')[0] == 'I'):
if (((i + 1) < len(tags)) and (tags[(i + 1)].split('-')[0] == 'I')):
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('invalid IOB format !!')
return new_tags |
_numpy_output(check_dtype=True)
def test_ufunc_arcsinh_c(A: dace.complex64[10]):
return np.arcsinh(A) |
def ToSentences(paragraph, include_token=True):
s_gen = SnippetGen(paragraph, SENTENCE_START, SENTENCE_END, include_token)
return [s for s in s_gen] |
def knapsack(seq, binary=True, max=1, value_only=False, solver=None, verbose=0, *, integrality_tolerance=0.001):
reals = (not isinstance(seq[0], tuple))
if reals:
seq = [(x, 1) for x in seq]
from sage.numerical.mip import MixedIntegerLinearProgram
from sage.rings.integer_ring import ZZ
p = MixedIntegerLinearProgram(solver=solver, maximization=True)
if binary:
present = p.new_variable(binary=True)
else:
present = p.new_variable(integer=True)
p.set_objective(p.sum([(present[i] * seq[i][1]) for i in range(len(seq))]))
p.add_constraint(p.sum([(present[i] * seq[i][0]) for i in range(len(seq))]), max=max)
if value_only:
return p.solve(objective_only=True, log=verbose)
else:
objective = p.solve(log=verbose)
present = p.get_values(present, convert=ZZ, tolerance=integrality_tolerance)
val = []
if reals:
[val.extend(([seq[i][0]] * present[i])) for i in range(len(seq))]
else:
[val.extend(([seq[i]] * present[i])) for i in range(len(seq))]
return [objective, val] |
def test_facets(domain):
ok = True
cmesh = domain.cmesh
_ok = (cmesh.num[1] == 26)
tst.report(('unique edges: %s' % _ok))
ok = (ok and _ok)
_ok = (cmesh.num[2] == 30)
tst.report(('unique faces: %s' % _ok))
ok = (ok and _ok)
assert ok |
def handle_arrow(obj, generate_bitmasks=False, pass_empty_field=False):
if isinstance(obj, pyarrow.lib.Array):
buffers = obj.buffers()
(awkwardarrow_type, storage_type) = to_awkwardarrow_storage_types(obj.type)
out = popbuffers(obj, awkwardarrow_type, storage_type, buffers, generate_bitmasks)
assert (len(buffers) == 0)
return out
elif isinstance(obj, pyarrow.lib.ChunkedArray):
layouts = [handle_arrow(x, generate_bitmasks) for x in obj.chunks if (len(x) > 0)]
if (len(layouts) == 1):
return layouts[0]
elif any((is_revertable(arr) for arr in layouts)):
assert all((is_revertable(arr) for arr in layouts))
return revertable(ak.operations.concatenate(layouts, highlevel=False), (lambda : ak.operations.concatenate([remove_optiontype(x) for x in layouts], highlevel=False)))
else:
return ak.operations.concatenate(layouts, highlevel=False)
elif isinstance(obj, pyarrow.lib.RecordBatch):
if (pass_empty_field and (list(obj.schema.names) == [''])):
layout = handle_arrow(obj.column(0), generate_bitmasks)
if (not obj.schema.field(0).nullable):
return remove_optiontype(layout)
else:
return layout
else:
record_is_optiontype = False
optiontype_fields = []
record_is_scalar = False
optiontype_parameters = None
recordtype_parameters = None
if ((obj.schema.metadata is not None) and (b'ak:parameters' in obj.schema.metadata)):
for x in json.loads(obj.schema.metadata[b'ak:parameters']):
(key,) = x.keys()
(value,) = x.values()
if (key == 'optiontype_fields'):
optiontype_fields = value
elif (key == 'record_is_scalar'):
record_is_scalar = value
elif (key in ('UnmaskedArray', 'BitMaskedArray', 'ByteMaskedArray', 'IndexedOptionArray')):
record_is_optiontype = True
optiontype_parameters = value
elif (key == 'RecordArray'):
recordtype_parameters = value
record_mask = None
contents = []
for i in range(obj.num_columns):
field = obj.schema.field(i)
layout = handle_arrow(obj.column(i), generate_bitmasks)
if record_is_optiontype:
if (record_mask is None):
record_mask = layout.mask_as_bool(valid_when=False)
else:
record_mask &= layout.mask_as_bool(valid_when=False)
if ((record_is_optiontype and (field.name not in optiontype_fields)) or (not field.nullable)):
contents.append(remove_optiontype(layout))
else:
contents.append(layout)
out = ak.contents.RecordArray(contents, obj.schema.names, length=len(obj), parameters=recordtype_parameters)
if record_is_scalar:
return out._getitem_at(0)
if (record_is_optiontype and (record_mask is None) and generate_bitmasks):
record_mask = numpy.zeros(len(out), dtype=np.bool_)
if (record_is_optiontype and (record_mask is None)):
return ak.contents.UnmaskedArray.simplified(out, parameters=optiontype_parameters)
elif record_is_optiontype:
return ak.contents.ByteMaskedArray.simplified(ak.index.Index8(record_mask), out, valid_when=False, parameters=optiontype_parameters)
else:
return out
elif isinstance(obj, pyarrow.lib.Table):
batches = obj.combine_chunks().to_batches()
if (len(batches) == 0):
return form_handle_arrow(obj.schema, pass_empty_field=pass_empty_field).length_zero_array(highlevel=False)
elif (len(batches) == 1):
return handle_arrow(batches[0], generate_bitmasks, pass_empty_field)
else:
arrays = [handle_arrow(batch, generate_bitmasks, pass_empty_field) for batch in batches if (len(batch) > 0)]
if any((is_revertable(arr) for arr in arrays)):
assert all((is_revertable(arr) for arr in arrays))
return revertable(ak.operations.concatenate(arrays, highlevel=False), (lambda : ak.operations.concatenate([remove_optiontype(x) for x in arrays], highlevel=False)))
else:
return ak.operations.concatenate(arrays, highlevel=False)
elif (isinstance(obj, Iterable) and isinstance(obj, Sized) and (len(obj) > 0) and all((isinstance(x, pyarrow.lib.RecordBatch) for x in obj)) and any(((len(x) > 0) for x in obj))):
chunks = []
for batch in obj:
chunk = handle_arrow(batch, generate_bitmasks, pass_empty_field)
if (len(chunk) > 0):
chunks.append(chunk)
if (len(chunks) == 1):
return chunks[0]
else:
return ak.operations.concatenate(chunks, highlevel=False)
elif (isinstance(obj, Iterable) and (len(obj) == 0)):
return ak.contents.RecordArray([], [], length=0)
else:
raise TypeError(f'unrecognized Arrow type: {type(obj)}') |
def print_table(task_names, scores):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
print(tb) |
_utils.test()
def test_offload_with_cross_block_locals2():
ret = ti.field(ti.f32)
ti.root.place(ret)
def ker():
s = 0
for i in range(10):
s += i
ret[None] = s
s = (ret[None] * 2)
for i in range(10):
ti.atomic_add(ret[None], s)
ker()
assert (ret[None] == (45 * 21)) |
def get_loss(prediction, labels, mask):
cls_loss = CELoss()
return cls_loss(prediction, labels, mask) |
_function_dispatch(_nanmedian_dispatcher)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
a = np.asanyarray(a)
if (a.size == 0):
return np.nanmean(a, axis, out=out, keepdims=keepdims)
(r, k) = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out, overwrite_input=overwrite_input)
if (keepdims and (keepdims is not np._NoValue)):
return r.reshape(k)
else:
return r |
_model
def tf_efficientnet_lite2(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model |
class CIntLike(object):
to_py_function = None
from_py_function = None
to_pyunicode_utility = None
default_format_spec = 'd'
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def create_to_py_utility_code(self, env):
if (type(self).to_py_function is None):
self.to_py_function = ('__Pyx_PyInt_From_' + self.specialization_name())
env.use_utility_code(TempitaUtilityCode.load_cached('CIntToPy', 'TypeConversion.c', context={'TYPE': self.empty_declaration_code(), 'TO_PY_FUNCTION': self.to_py_function}))
return True
def create_from_py_utility_code(self, env):
if (type(self).from_py_function is None):
self.from_py_function = ('__Pyx_PyInt_As_' + self.specialization_name())
env.use_utility_code(TempitaUtilityCode.load_cached('CIntFromPy', 'TypeConversion.c', context={'TYPE': self.empty_declaration_code(), 'FROM_PY_FUNCTION': self.from_py_function}))
return True
def _parse_format(format_spec):
padding = ' '
if (not format_spec):
return ('d', 0, padding)
format_type = format_spec[(- 1)]
if (format_type in ('o', 'd', 'x', 'X')):
prefix = format_spec[:(- 1)]
elif format_type.isdigit():
format_type = 'd'
prefix = format_spec
else:
return (None, 0, padding)
if (not prefix):
return (format_type, 0, padding)
if (prefix[0] == '-'):
prefix = prefix[1:]
if (prefix and (prefix[0] == '0')):
padding = '0'
prefix = prefix.lstrip('0')
if prefix.isdigit():
return (format_type, int(prefix), padding)
return (None, 0, padding)
def can_coerce_to_pystring(self, env, format_spec=None):
(format_type, width, padding) = self._parse_format(format_spec)
return ((format_type is not None) and (width <= (2 ** 30)))
def convert_to_pystring(self, cvalue, code, format_spec=None):
if (self.to_pyunicode_utility is None):
utility_code_name = ('__Pyx_PyUnicode_From_' + self.specialization_name())
to_pyunicode_utility = TempitaUtilityCode.load_cached('CIntToPyUnicode', 'TypeConversion.c', context={'TYPE': self.empty_declaration_code(), 'TO_PY_FUNCTION': utility_code_name})
self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility)
else:
(utility_code_name, to_pyunicode_utility) = self.to_pyunicode_utility
code.globalstate.use_utility_code(to_pyunicode_utility)
(format_type, width, padding_char) = self._parse_format(format_spec)
return ("%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type)) |
def compute_barcode(graph_data, weight_col='intersection_size'):
nodes = graph_data['nodes']
links = graph_data['links']
components = []
barcode = []
for node in nodes:
components.append([node['id']])
for link in links:
link['intersection_size']['value'] = int(link['intersection_size']['value'])
link['jaccard_index']['value'] = float(link['jaccard_index']['value'])
links = sorted(links, key=(lambda item: (1 / item[weight_col]['value'])))
for link in links:
source_id = link['source']
target_id = link['target']
if (link[weight_col] == 0):
weight = np.inf
else:
weight = (1 / link[weight_col]['value'])
source_cc_idx = find_cc_index(components, source_id)
target_cc_idx = find_cc_index(components, target_id)
if (source_cc_idx != target_cc_idx):
source_cc = components[source_cc_idx]
target_cc = components[target_cc_idx]
components = [components[i] for i in range(len(components)) if (i not in [source_cc_idx, target_cc_idx])]
components.append((source_cc + target_cc))
link[weight_col]['nodes_subsets'] = {'source_cc': source_cc, 'target_cc': target_cc}
link[weight_col]['cc_list'] = components.copy()
barcode.append({'birth': 0, 'death': weight, 'edge': link})
for cc in components:
barcode.append({'birth': 0, 'death': (- 1), 'edge': 'undefined'})
return barcode |
def _parse_params(params, default_params):
if (params is None):
params = {}
result = copy.deepcopy(default_params)
for (key, value) in params.items():
if (key not in default_params):
print('unknown key', key, value)
continue
if isinstance(value, dict):
default_dict = default_params[key]
if (not isinstance(default_dict, dict)):
raise ValueError('%s should not be a dictionary', key)
if default_dict:
value = _parse_params(value, default_dict)
else:
pass
if (value is None):
continue
if (default_params[key] is None):
result[key] = value
else:
result[key] = type(default_params[key])(value)
return result |
class TestSetState(object):
def setup(self):
self.seed =
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all((old == new)))
def test_gaussian_reset(self):
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all((old == new)))
def test_gaussian_reset_in_media_res(self):
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all((old == new)))
def test_backwards_compatibility(self):
old_state = self.state[:(- 2)]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all((x1 == x2)))
assert_(np.all((x1 == x3)))
def test_negative_binomial(self):
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(PCG64())
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_((len(w) == 1))
assert isinstance(state, dict)
assert (state['bit_generator'] == 'PCG64')
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = (('Unknown',) + state[1:])
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state, np.array(new_state, dtype=np.object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled['has_gauss'], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith('RandomState(MT19937)') |
def preprocess_scenes(scene_name):
try:
collect_point_data(scene_name)
print('name: ', scene_name)
except Exception as e:
sys.stderr.write((scene_name + 'ERROR!!'))
sys.stderr.write(str(e))
sys.exit((- 1)) |
def _gen_harmonic(n, a):
(n, a) = np.broadcast_arrays(n, a)
return _lazywhere((a > 1), (n, a), f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1) |
class Decoder(nn.Module):
def __init__(self, opt, disc=False):
super(Decoder, self).__init__()
self.num_channel = opt.nc
self.b_size = opt.b_size
self.h = opt.h
self.disc = disc
self.t_act = opt.tanh
self.scale_size = opt.scale_size
self.l0 = nn.Linear(self.h, ((8 * 8) * self.num_channel))
self.l1 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l2 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
self.l3 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l4 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up2 = nn.UpsamplingNearest2d(scale_factor=2)
self.l5 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l6 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up3 = nn.UpsamplingNearest2d(scale_factor=2)
self.l7 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l8 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
if (self.scale_size == 128):
self.up4 = nn.UpsamplingNearest2d(scale_factor=2)
self.l10 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l11 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
elif (self.scale_size == 256):
self.up4 = nn.UpsamplingNearest2d(scale_factor=2)
self.l10 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l11 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up5 = nn.UpsamplingNearest2d(scale_factor=2)
self.l12 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l13 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
elif (self.scale_size == 512):
self.up4 = nn.UpsamplingNearest2d(scale_factor=2)
self.l10 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l11 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up5 = nn.UpsamplingNearest2d(scale_factor=2)
self.l12 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l13 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up6 = nn.UpsamplingNearest2d(scale_factor=2)
self.l14 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l15 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
elif (self.scale_size == 1024):
self.up4 = nn.UpsamplingNearest2d(scale_factor=2)
self.l10 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l11 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up5 = nn.UpsamplingNearest2d(scale_factor=2)
self.l12 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l13 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up6 = nn.UpsamplingNearest2d(scale_factor=2)
self.l14 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l15 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.up7 = nn.UpsamplingNearest2d(scale_factor=2)
self.l16 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l17 = nn.Conv2d(self.num_channel, self.num_channel, 3, 1, 1)
self.l9 = nn.Conv2d(self.num_channel, 3, 3, 1, 1)
def forward(self, input, batch_size=None):
if (not batch_size):
batch_size = self.b_size
x = self.l0(input)
x = x.view((- 1), self.num_channel, 8, 8)
x = F.elu(self.l1(x), True)
x = F.elu(self.l2(x), True)
x = self.up1(x)
x = F.elu(self.l3(x), True)
x = F.elu(self.l4(x), True)
x = self.up2(x)
x = F.elu(self.l5(x), True)
x = F.elu(self.l6(x), True)
x = self.up3(x)
x = F.elu(self.l7(x), True)
x = F.elu(self.l8(x), True)
if (self.scale_size == 128):
x = self.up4(x)
x = F.elu(self.l10(x))
x = F.elu(self.l11(x))
elif (self.scale_size == 256):
x = self.up4(x)
x = F.elu(self.l10(x))
x = F.elu(self.l11(x))
x = self.up5(x)
x = F.elu(self.l12(x))
x = F.elu(self.l13(x))
elif (self.scale_size == 512):
x = self.up4(x)
x = F.elu(self.l10(x))
x = F.elu(self.l11(x))
x = self.up5(x)
x = F.elu(self.l12(x))
x = F.elu(self.l13(x))
x = self.up6(x)
x = F.elu(self.l14(x))
x = F.elu(self.l15(x))
elif (self.scale_size == 1024):
x = self.up4(x)
x = F.elu(self.l10(x))
x = F.elu(self.l11(x))
x = self.up5(x)
x = F.elu(self.l12(x))
x = F.elu(self.l13(x))
x = self.up6(x)
x = F.elu(self.l14(x))
x = F.elu(self.l15(x))
x = self.up7(x)
x = F.elu(self.l16(x))
x = F.elu(self.l17(x))
x = self.l9(x)
x = F.tanh(x)
return x |
class StackFrames(gym.Wrapper):
def __init__(self, env, n_frames):
if (not isinstance(env.observation_space, gym.spaces.Box)):
raise ValueError('Stack frames only works with gym.spaces.Box environment.')
if (len(env.observation_space.shape) != 2):
raise ValueError('Stack frames only works with 2D single channel images')
super().__init__(env)
self._n_frames = n_frames
self._frames = deque(maxlen=n_frames)
new_obs_space_shape = (env.observation_space.shape + (n_frames,))
_low = env.observation_space.low.flatten()[0]
_high = env.observation_space.high.flatten()[0]
self._observation_space = gym.spaces.Box(_low, _high, shape=new_obs_space_shape, dtype=env.observation_space.dtype)
def observation_space(self):
return self._observation_space
_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
def _stack_frames(self):
return np.stack(self._frames, axis=2)
def reset(self):
observation = self.env.reset()
self._frames.clear()
for i in range(self._n_frames):
self._frames.append(observation)
return self._stack_frames()
def step(self, action):
(new_observation, reward, done, info) = self.env.step(action)
self._frames.append(new_observation)
return (self._stack_frames(), reward, done, info) |
_utils.test(arch=get_host_arch_list())
def test_static_assert_data_type_ok():
x = ti.field(ti.f32, ())
def func():
ti.static_assert((x.dtype == ti.f32))
func() |
class Config(object):
def python_version(self):
if has_attr(site_cfg, 'python_version'):
if ('*' in site_cfg.python_version):
return ('%d.%d' % tuple(sys.version_info[:2]))
else:
return site_cfg.python_version
else:
return ('%d.%d' % tuple(sys.version_info[:2]))
def python_include(self):
if (has_attr(site_cfg, 'python_include') and (site_cfg.python_include != 'auto')):
return site_cfg.python_include
else:
return sysconfig.get_config_var('INCLUDEPY')
def system(self):
if (has_attr(site_cfg, 'system') and (site_cfg.system is not None)):
return site_cfg.system
elif (os.name in ['posix']):
return 'posix'
elif (os.name in ['nt']):
return 'windows'
else:
raise ValueError(msg_unknown_os)
def compile_flags(self):
if has_attr(site_cfg, 'compile_flags'):
flags = site_cfg.compile_flags
if isinstance(flags, str):
warn('Compile flags should be given as a list of strings. Space-separated strings may be removed in the near future.', DeprecationWarning, stacklevel=2)
flags = flags.split()
else:
flags = ['-g', '-O2']
return (flags + compose_system_compile_flags((self.system() == 'posix')))
def debug_flags(self) -> list:
if has_attr(site_cfg, 'debug_flags'):
return site_cfg.debug_flags
else:
return []
def numpydoc_path(self):
if (has_attr(site_cfg, 'numpydoc_path') and (site_cfg.numpydoc_path is not None)):
return site_cfg.numpydoc_path
else:
try:
import numpydoc
except ImportError:
raise ValueError(msg_numpydoc)
def is_release(self):
if has_attr(site_cfg, 'is_release'):
return site_cfg.is_release
else:
return ''
def tetgen_path(self):
if has_attr(site_cfg, 'tetgen_path'):
return site_cfg.tetgen_path
else:
return '/usr/bin/tetgen'
def refmap_memory_factor(self):
if has_attr(site_cfg, 'refmap_memory_factor'):
return site_cfg.refmap_memory_factor
else:
return None |
class Dummy(Dataset):
def __init__(self, cfgdata):
self.length = int(cfgdata.length)
def __len__(self):
return self.length
def __getitem__(self, idx):
return {} |
def snapshot(gc_generation=0) -> MallocInstant:
if (gc_generation is not None):
gc.collect(gc_generation)
return MallocInstant(tracemalloc.take_snapshot()) |
def TTable_GetMapHitsIterator(GraphSeq, Context, MaxIter=20):
return _snap.TTable_GetMapHitsIterator(GraphSeq, Context, MaxIter) |
class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = GPT2Tokenizer
test_rust_tokenizer = True
def setUp(self):
super(GPT2TokenizationTest, self).setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPT2TokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['Glow', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text, add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False, add_prefix_space=True)
sequence = u'lower newer'
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
ids = tokenizer.encode(sequence, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
input_tokens = (tokens + [rust_tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) |
def dummy_embedded_data(dummy_data, hparams):
(text_padded, input_lengths, mel_padded, gate_padded, output_lengths) = dummy_data
embedded_input = torch.nn.Embedding(hparams.n_symbols, hparams.symbols_embedding_dim)(text_padded)
return (embedded_input, input_lengths, mel_padded, gate_padded, output_lengths) |
def _hparams(algorithm, dataset, random_seed):
SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST']
hparams = {}
def _hparam(name, default_val, random_val_fn):
assert (name not in hparams)
random_state = np.random.RandomState(misc.seed_hash(random_seed, name))
hparams[name] = (default_val, random_val_fn(random_state))
_hparam('data_augmentation', True, (lambda r: True))
_hparam('arch', 'resnet50', (lambda r: 'resnet50'))
_hparam('resnet_dropout', 0.0, (lambda r: r.choice([0.0, 0.1, 0.5])))
_hparam('class_balanced', False, (lambda r: False))
_hparam('nonlinear_classifier', False, (lambda r: bool(r.choice([False, False]))))
if (algorithm in ['DANN', 'CDANN']):
_hparam('lambda', 1.0, (lambda r: (10 ** r.uniform((- 2), 2))))
_hparam('weight_decay_d', 0.0, (lambda r: (10 ** r.uniform((- 6), (- 2)))))
_hparam('d_steps_per_g_step', 1, (lambda r: int((2 ** r.uniform(0, 3)))))
_hparam('grad_penalty', 0.0, (lambda r: (10 ** r.uniform((- 2), 1))))
_hparam('beta1', 0.5, (lambda r: r.choice([0.0, 0.5])))
_hparam('mlp_width', 256, (lambda r: int((2 ** r.uniform(6, 10)))))
_hparam('mlp_depth', 3, (lambda r: int(r.choice([3, 4, 5]))))
_hparam('mlp_dropout', 0.0, (lambda r: r.choice([0.0, 0.1, 0.5])))
elif (algorithm == 'Fish'):
_hparam('meta_lr', 0.5, (lambda r: r.choice([0.05, 0.1, 0.5])))
elif (algorithm == 'RSC'):
_hparam('rsc_f_drop_factor', (1 / 3), (lambda r: r.uniform(0, 0.5)))
_hparam('rsc_b_drop_factor', (1 / 3), (lambda r: r.uniform(0, 0.5)))
elif (algorithm == 'SagNet'):
_hparam('sag_w_adv', 0.1, (lambda r: (10 ** r.uniform((- 2), 1))))
elif (algorithm == 'IRM'):
_hparam('irm_lambda', 100.0, (lambda r: (10 ** r.uniform((- 1), 5))))
_hparam('irm_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4)))))
elif (algorithm == 'Mixup'):
_hparam('mixup_alpha', 0.2, (lambda r: (10 ** r.uniform((- 1), (- 1)))))
elif (algorithm == 'GroupDRO'):
_hparam('groupdro_eta', 0.01, (lambda r: (10 ** r.uniform((- 3), (- 1)))))
elif ((algorithm == 'MMD') or (algorithm == 'CORAL')):
_hparam('mmd_gamma', 1.0, (lambda r: (10 ** r.uniform((- 1), 1))))
elif (algorithm == 'MLDG'):
_hparam('mldg_beta', 1.0, (lambda r: (10 ** r.uniform((- 1), 1))))
elif (algorithm == 'MTL'):
_hparam('mtl_ema', 0.99, (lambda r: r.choice([0.5, 0.9, 0.99, 1.0])))
elif (algorithm == 'VREx'):
_hparam('vrex_lambda', 10.0, (lambda r: (10 ** r.uniform((- 1), 5))))
_hparam('vrex_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4)))))
elif (algorithm == 'SD'):
_hparam('sd_reg', 0.1, (lambda r: (10 ** r.uniform((- 5), (- 1)))))
elif (algorithm == 'ANDMask'):
_hparam('tau', 1, (lambda r: r.uniform(0.5, 1.0)))
elif (algorithm == 'IGA'):
_hparam('penalty', 1000, (lambda r: (10 ** r.uniform(1, 5))))
elif (algorithm == 'SANDMask'):
_hparam('tau', 1.0, (lambda r: r.uniform(0.0, 1.0)))
_hparam('k', 10.0, (lambda r: (10 ** r.uniform((- 3), 5))))
elif (algorithm == 'Fishr'):
_hparam('lambda', 1000.0, (lambda r: (10 ** r.uniform(1.0, 4.0))))
_hparam('penalty_anneal_iters', 1500, (lambda r: int(r.uniform(0.0, 5000.0))))
_hparam('ema', 0.95, (lambda r: r.uniform(0.9, 0.99)))
elif (algorithm == 'TRM'):
_hparam('cos_lambda', 0.0001, (lambda r: (10 ** r.uniform((- 5), 0))))
_hparam('iters', 200, (lambda r: int((10 ** r.uniform(0, 4)))))
_hparam('groupdro_eta', 0.01, (lambda r: (10 ** r.uniform((- 3), (- 1)))))
elif (algorithm == 'IB_ERM'):
_hparam('ib_lambda', 100.0, (lambda r: (10 ** r.uniform((- 1), 5))))
_hparam('ib_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4)))))
elif (algorithm == 'IB_IRM'):
_hparam('irm_lambda', 100.0, (lambda r: (10 ** r.uniform((- 1), 5))))
_hparam('irm_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4)))))
_hparam('ib_lambda', 100.0, (lambda r: (10 ** r.uniform((- 1), 5))))
_hparam('ib_penalty_anneal_iters', 500, (lambda r: int((10 ** r.uniform(0, 4)))))
elif ((algorithm == 'CAD') or (algorithm == 'CondCAD')):
_hparam('lmbda', 0.1, (lambda r: r.choice([0.0001, 0.001, 0.01, 0.1, 1, 10.0, 100.0])))
_hparam('temperature', 0.1, (lambda r: r.choice([0.05, 0.1])))
_hparam('is_normalized', False, (lambda r: False))
_hparam('is_project', False, (lambda r: False))
_hparam('is_flipped', True, (lambda r: True))
if (dataset in SMALL_IMAGES):
_hparam('lr', 0.001, (lambda r: (10 ** r.uniform((- 4.5), (- 2.5)))))
else:
_hparam('lr', 5e-05, (lambda r: (10 ** r.uniform((- 5), (- 3.5)))))
if (dataset in SMALL_IMAGES):
_hparam('weight_decay', 0.0, (lambda r: 0.0))
else:
_hparam('weight_decay', 0.0, (lambda r: (10 ** r.uniform((- 6), (- 2)))))
if (dataset in SMALL_IMAGES):
_hparam('batch_size', 64, (lambda r: int((2 ** r.uniform(3, 9)))))
elif (algorithm == 'ARM'):
_hparam('batch_size', 8, (lambda r: 8))
elif (dataset == 'DomainNet'):
_hparam('batch_size', 32, (lambda r: int((2 ** r.uniform(3, 5)))))
else:
_hparam('batch_size', 32, (lambda r: int((2 ** r.uniform(3, 5.5)))))
if ((algorithm in ['DANN', 'CDANN']) and (dataset in SMALL_IMAGES)):
_hparam('lr_g', 0.001, (lambda r: (10 ** r.uniform((- 4.5), (- 2.5)))))
elif (algorithm in ['DANN', 'CDANN']):
_hparam('lr_g', 5e-05, (lambda r: (10 ** r.uniform((- 5), (- 3.5)))))
if ((algorithm in ['DANN', 'CDANN']) and (dataset in SMALL_IMAGES)):
_hparam('lr_d', 0.001, (lambda r: (10 ** r.uniform((- 4.5), (- 2.5)))))
elif (algorithm in ['DANN', 'CDANN']):
_hparam('lr_d', 5e-05, (lambda r: (10 ** r.uniform((- 5), (- 3.5)))))
if ((algorithm in ['DANN', 'CDANN']) and (dataset in SMALL_IMAGES)):
_hparam('weight_decay_g', 0.0, (lambda r: 0.0))
elif (algorithm in ['DANN', 'CDANN']):
_hparam('weight_decay_g', 0.0, (lambda r: (10 ** r.uniform((- 6), (- 2)))))
del hparams['batch_size']
del hparams['lr']
del hparams['weight_decay']
_hparam('batch_size', 32, (lambda r: 32))
_hparam('lr', 5e-05, (lambda r: 5e-05))
_hparam('weight_decay', 0, (lambda r: (10 ** r.uniform((- 6), (- 4)))))
return hparams |
def inception_v3(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, create_aux_logits=True, scope='InceptionV3', global_pool=False):
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_v3_base(inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier)
if (create_aux_logits and num_classes):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, padding='VALID', scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1')
kernel_size = _reduced_kernel_size_for_small_input(aux_logits, [5, 5])
aux_logits = slim.conv2d(aux_logits, depth(768), kernel_size, weights_initializer=trunc_normal(0.01), padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(aux_logits, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, weights_initializer=trunc_normal(0.001), scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
with tf.variable_scope('Logits'):
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')
end_points['global_pool'] = net
else:
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size))
end_points['AvgPool_1a'] = net
if (not num_classes):
return (net, end_points)
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return (logits, end_points) |
def ref_crelu(x, axis):
return np.concatenate([np.maximum(x, 0), np.maximum((- x), 0)], axis=axis) |
class CharWordEmbedder(nn.Module):
def __init__(self, num_chars, embedding_size, output_size, num_heads=8, padding_idx=0):
super(CharWordEmbedder, self).__init__()
self.num_chars = num_chars
self.char_embedding = nn.Embedding(num_chars, embedding_size, padding_idx=padding_idx)
self.attn = MultiHeadAttention(embedding_size, output_size, num_heads)
self.padding_idx = padding_idx
def forward(self, x):
(B, Tw, Tc) = x.shape
x = x.flatten(0, 1)
x = self.char_embedding(x)
mask = x.eq(self.padding_idx)
self.attn.set_mask_k(mask)
self.attn.set_mask_q(mask)
x = self.attn(x)
x = x.sum(1)
x = x.view(B, Tw, x.size((- 1)))
return x |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output-dir', required=True)
parser.add_argument('--scaling-value', type=int, help='maximum value for scaling in FEXIPRO')
parser.add_argument('--sigma', type=float, help='percentage of SIGMA for SVD incremental prune')
parser.add_argument('--top-K', help='list of comma-separated integers, e.g., 1,5,10,50')
parser.add_argument('--sample-size', help='number of users to sample')
parser.add_argument('--icc', dest='icc', action='store_true')
parser.add_argument('--no-icc', dest='icc', action='store_false')
parser.set_defaults(icc=False)
parser.add_argument('--mkl', dest='mkl', action='store_true')
parser.add_argument('--no-mkl', dest='mkl', action='store_false')
parser.set_defaults(mkl=False)
parser.add_argument('--fexipro', dest='run_fexipro', action='store_true')
parser.add_argument('--lemp', dest='run_fexipro', action='store_false')
parser.set_defaults(run_fexipro=True)
args = parser.parse_args()
scaling_value = (args.scaling_value if args.scaling_value else 127)
sigma = (args.sigma if args.sigma else 0.8)
sample_size = (args.sample_size if args.sample_size else 1000)
TOP_K = ([int(val) for val in args.top_K.split(',')] if args.top_K else [1, 5, 10, 50])
ALGS = ['SIR', 'SI']
blocked_mm_runner = '../cpp/blocked_mm/blocked_mm'
BUILD_COMMAND = 'cd ../cpp/blocked_mm && make clean && make -j2'
if args.icc:
BUILD_COMMAND += ' ICC=1'
if args.mkl:
BUILD_COMMAND += ' MKL=1'
BUILD_COMMAND += ' && cd -'
subprocess.call(BUILD_COMMAND, shell=True)
other_runner = ('../fexipro-orig-build/runFEXIPRO' if args.run_fexipro else '../lemp-no-icc-simd/tools/runLemp')
output_dir = args.output_dir
if (output_dir[(- 1)] != '/'):
output_dir += '/'
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
run_args = []
numa_queue = get_numa_queue()
for (model_dir, (num_factors, _, num_items, _, _), _) in TO_RUN:
input_dir = os.path.join(MODEL_DIR_BASE, model_dir)
base_name = model_dir.replace('/', '-')
for (K, alg) in product(TOP_K, ALGS):
run_args.append((numa_queue, K, alg, scaling_value, sigma, num_factors, num_items, sample_size, args.run_fexipro, input_dir, base_name, output_dir, blocked_mm_runner, other_runner))
pool = multiprocessing.Pool(NUM_NUMA_NODES)
pool.map(run, run_args) |
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(Flatten(), nn.Linear(gate_channels, (gate_channels // reduction_ratio)), nn.ReLU(), nn.Linear((gate_channels // reduction_ratio), gate_channels))
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if (pool_type == 'avg'):
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif (pool_type == 'max'):
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif (pool_type == 'lp'):
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif (pool_type == 'lse'):
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if (channel_att_sum is None):
channel_att_sum = channel_att_raw
else:
channel_att_sum = (channel_att_sum + channel_att_raw)
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return (x * scale) |
class Encoder(nn.Module):
def __init__(self, z_dim, c_dim, x_dim, filt_per_layer=64):
super(Encoder, self).__init__()
self.model = nn.Sequential(nn.Conv2d(int(c_dim), filt_per_layer, 4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(filt_per_layer, filt_per_layer, 4, stride=2, padding=1), nn.ReLU(), nn.ZeroPad2d((1, 2, 1, 2)), nn.Conv2d(filt_per_layer, filt_per_layer, 4, stride=1, padding=0), nn.ReLU())
self.fc_mu = nn.Linear(int(((filt_per_layer * x_dim) / 16)), z_dim)
self.fc_logvar = nn.Linear(int(((filt_per_layer * x_dim) / 16)), z_dim)
def encode(self, x):
z = self.model(x)
z = z.view(z.shape[0], (- 1))
return (self.fc_mu(z), self.fc_logvar(z))
def reparameterize(self, mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return (mu + (eps * std))
def forward(self, x):
(mu, logvar) = self.encode(x)
z = self.reparameterize(mu, logvar)
return (z, mu, logvar) |
def type_hint(arg_name, arg_type):
def wrap(f):
meta = getattr(f, '__tweak_type_hint_meta__', None)
if (meta is None):
f.__tweak_type_hint_meta__ = meta = {}
meta[arg_name] = arg_type
return f
return wrap |
_start_docstrings('XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer\n on top of the pooled output) e.g. for GLUE tasks. ', XLM_ROBERTA_START_DOCSTRING)
class XLMRobertaForSequenceClassification(RobertaForSequenceClassification):
config_class = XLMRobertaConfig |
def epoch_speedup(*args, idx=(- 1), **kwargs):
return list(epoch_speedup_dict(*args, **kwargs).values())[idx] |
def to_markdown_table(res: TimingResultType, header: Tuple[(str, ...)]=None) -> str:
if (header is None):
header = ('model', 'task', 'mean', 'var')
out = ''
def write_line(*args):
nonlocal out
out += '| {} |\n'.format(' | '.join((str(a) for a in args)))
write_line(*header)
write_line(*(['--'] * len(header)))
for (model, tasks) in res.items():
for (task, line) in tasks.items():
write_line(*((model, task) + line))
return out |
def reverse_sequence(tensor: Tensor, *, axis: Dim) -> Tensor:
indices = (rf.combine_bc(axis.get_size_tensor(), '-', rf.range_over_dim(axis)) - 1)
return rf.gather(tensor, indices=indices, axis=axis, clip_to_valid=True) |
class DeqSwishQuantTest(serial.SerializedTestCase):
def _get_scale_zp(self, tensor):
tensor_max = np.max(tensor)
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16(((tensor_max - tensor_min) / 255.0)))
zero_point = ((- tensor_min) / scale)
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)
def _sigmoid(self, x):
return (1.0 / (1.0 + np.exp(np.float32((- x)))))
def _swish(self, x):
return (np.float32(x) * self._sigmoid(x))
def test_swish_int8(self):
np.random.seed(0)
workspace.ResetWorkspace()
n = 256
X_fp32 = np.linspace((- 20.5), 8.0, num=n).astype(np.float32).reshape(1, n)
Y_fp32 = self._swish(X_fp32)
(X_scale, X_zero_point) = self._get_scale_zp(X_fp32)
(Y_scale, Y_zero_point) = self._get_scale_zp(Y_fp32)
W_fp32 = np.identity(n, dtype=np.float32)
b_fp32 = np.zeros((n,), dtype=np.float32)
workspace.FeedBlob('X', X_fp32)
workspace.FeedBlob('W', W_fp32)
workspace.FeedBlob('b', b_fp32)
workspace.RunOperatorOnce(core.CreateOperator('Int8FCPackWeight', ['W'], ['W_int8'], engine='DNNLOWP', save_unpacked_weights=True, in_scale=X_scale))
ref_net1 = core.Net('net')
ref_net1.Int8QuantizeNNPI(['X'], ['X_int8'], Y_scale=X_scale, Y_zero_point=X_zero_point)
ref_net1.Int8FCFakeAcc32NNPI(['X_int8', 'W_int8', 'b'], ['U_int8'], Y_scale=X_scale, Y_zero_point=X_zero_point)
ref_net1.SwishFakeInt8NNPI(['U_int8'], ['Y'], X_scale=X_scale, X_zero_point=X_zero_point, Y_scale=Y_scale, Y_zero_point=Y_zero_point)
ref_net1.Proto().external_output.append('Y')
ref_net = core.Net('net')
ref_net.Int8QuantizeNNPI(['X'], ['X_int8'], Y_scale=X_scale, Y_zero_point=X_zero_point)
ref_net.Int8FCFakeAcc32NNPI(['X_int8', 'W_int8', 'b'], ['U_int8'], Y_scale=X_scale, Y_zero_point=X_zero_point)
ref_net.Int8DequantizeNNPI(['U_int8'], ['U_fp16'], UsingOneOverScale=False)
ref_net.SwishFakeFp16NNPI(['U_fp16'], ['Y_fp16'])
ref_net.Int8QuantizeNNPI(['Y_fp16'], ['Y'], Y_scale=Y_scale, Y_zero_point=Y_zero_point)
ref_net.Proto().external_output.append('Y')
workspace.RunNetOnce(ref_net1)
Y_fbgemm = workspace.FetchInt8Blob('Y')
ref_net.Proto().op[0].type = 'Int8Quantize'
ref_net.Proto().op[1].type = 'Int8FC'
ref_net.Proto().op[2].type = 'Int8Dequantize'
ref_net.Proto().op[3].type = 'Swish'
ref_net.Proto().op[4].type = 'Int8Quantize'
net_onnxified = onnxifi_caffe2_net(ref_net.Proto(), {}, debug=True, adjust_batch=False, use_onnx=False, weight_names=['W_int8', 'b'])
num_onnxified_ops = sum(((1 if (o.type == 'Onnxifi') else 0) for o in net_onnxified.op))
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.CreateNet(net_onnxified)
workspace.RunNet(net_onnxified.name)
Y_glow = workspace.FetchInt8Blob('Y')
U_int8 = workspace.FetchInt8Blob('U_int8')
diff_Y = np.abs((Y_glow.data - Y_fbgemm.data))
num_mismatches = np.count_nonzero(diff_Y)
max_diff = np.max(diff_Y)
if ((max_diff > 0) or (Y_glow.scale != Y_fbgemm.scale) or (Y_glow.zero_point != Y_fbgemm.zero_point)):
print_test_debug_info('QuantizedSwish', {'X': X_fp32, 'X_scale': X_scale, 'X_zero_point': X_zero_point, 'Y_scale': Y_scale, 'Y_zero_point': Y_zero_point, 'U_int8': U_int8, 'Y_fbgemm': Y_fbgemm, 'Y_glow': Y_glow, 'diff': diff_Y, 'max_diff': max_diff, 'num_mismatches': num_mismatches})
assert 0 |
class PredictionVolume(VolumeMetric):
def __init__(self, metric: str='PREDVOL'):
super().__init__(metric)
def calculate(self):
return self._calculate_volume(self.prediction) |
class Arrangements_msetk(Arrangements, Permutations_msetk):
def _repr_(self):
return ('Arrangements of the multi-set %s of length %s' % (list(self.mset), self._k)) |
def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, minimizer_kwargs=None, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=None, seed=None):
x0 = np.array(x0)
rng = check_random_state(seed)
if (minimizer_kwargs is None):
minimizer_kwargs = dict()
wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, **minimizer_kwargs)
if (take_step is not None):
if (not callable(take_step)):
raise TypeError('take_step must be callable')
if hasattr(take_step, 'stepsize'):
take_step_wrapped = AdaptiveStepsize(take_step, interval=interval, verbose=disp)
else:
take_step_wrapped = take_step
else:
displace = RandomDisplacement(stepsize=stepsize, random_state=rng)
take_step_wrapped = AdaptiveStepsize(displace, interval=interval, verbose=disp)
accept_tests = []
if (accept_test is not None):
if (not callable(accept_test)):
raise TypeError('accept_test must be callable')
accept_tests = [accept_test]
metropolis = Metropolis(T, random_state=rng)
accept_tests.append(metropolis)
if (niter_success is None):
niter_success = (niter + 2)
bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, accept_tests, disp=disp)
(count, i) = (0, 0)
message = ['requested number of basinhopping iterations completed successfully']
for i in range(niter):
new_global_min = bh.one_cycle()
if callable(callback):
val = callback(bh.xtrial, bh.energy_trial, bh.accept)
if (val is not None):
if val:
message = ['callback function requested stop early byreturning True']
break
count += 1
if new_global_min:
count = 0
elif (count > niter_success):
message = ['success condition satisfied']
break
res = bh.res
res.lowest_optimization_result = bh.storage.get_lowest()
res.x = np.copy(res.lowest_optimization_result.x)
res.fun = res.lowest_optimization_result.fun
res.message = message
res.nit = (i + 1)
return res |
class BatchNorm1d(_BatchNorm):
def _check_input_dim(self, input):
if ((input.dim() != 2) and (input.dim() != 3)):
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim())) |
def generate_loss(level='light', env='basic_mac_6h_vs_8z'):
if (level == 'none'):
if (env == 'basic_mac_6h_vs_8z'):
loss = th.ones((400, ((8 * 6) * 6))).cuda()
elif (env == 'basic_mac_3s_vs_4z'):
loss = th.ones((400, ((8 * 3) * 3))).cuda()
elif (env == 'basic_mac_3s_vs_5z'):
loss = th.ones((400, ((8 * 3) * 3))).cuda()
elif (env == 'basic_mac_2c_vs_64zg'):
loss = th.ones((800, ((8 * 2) * 2))).cuda()
elif (env == 'basic_mac_corridor'):
loss = th.ones((500, ((8 * 6) * 6))).cuda()
elif (env == 'basic_mac_3s5z'):
loss = th.ones((400, ((8 * 8) * 8))).cuda()
elif (level == 'light'):
if (env == 'basic_mac_6h_vs_8z'):
loss = generate_loss_pattern_light(episode_length=400, dimension=((8 * 6) * 6), p_matrix=prob_transition_matrix_light)
elif (env == 'basic_mac_3s_vs_4z'):
loss = generate_loss_pattern_light(episode_length=400, dimension=((8 * 3) * 3), p_matrix=prob_transition_matrix_light)
elif (env == 'basic_mac_3s_vs_5z'):
loss = generate_loss_pattern_light(episode_length=400, dimension=((8 * 3) * 3), p_matrix=prob_transition_matrix_light)
elif (env == 'basic_mac_2c_vs_64zg'):
loss = generate_loss_pattern_light(episode_length=400, dimension=((8 * 2) * 2), p_matrix=prob_transition_matrix_light)
elif (env == 'basic_mac_corridor'):
loss = generate_loss_pattern_light(episode_length=500, dimension=((8 * 6) * 6), p_matrix=prob_transition_matrix_light)
elif (env == 'basic_mac_3s5z'):
loss = generate_loss_pattern_light(episode_length=400, dimension=((8 * 8) * 8), p_matrix=prob_transition_matrix_light)
elif (level == 'medium'):
if (env == 'basic_mac_6h_vs_8z'):
loss = generate_loss_pattern_medium(episode_length=400, dimension=((8 * 6) * 6), p_matrix=prob_transition_matrix_medium)
elif (env == 'basic_mac_3s_vs_4z'):
loss = generate_loss_pattern_medium(episode_length=400, dimension=((8 * 3) * 3), p_matrix=prob_transition_matrix_medium)
elif (env == 'basic_mac_3s_vs_5z'):
loss = generate_loss_pattern_medium(episode_length=400, dimension=((8 * 3) * 3), p_matrix=prob_transition_matrix_medium)
elif (env == 'basic_mac_2c_vs_64zg'):
loss = generate_loss_pattern_medium(episode_length=400, dimension=((8 * 2) * 2), p_matrix=prob_transition_matrix_medium)
elif (env == 'basic_mac_corridor'):
loss = generate_loss_pattern_medium(episode_length=500, dimension=((8 * 6) * 6), p_matrix=prob_transition_matrix_medium)
elif (env == 'basic_mac_3s5z'):
loss = generate_loss_pattern_medium(episode_length=400, dimension=((8 * 8) * 8), p_matrix=prob_transition_matrix_medium)
elif (env == 'basic_mac_6h_vs_8z'):
loss = generate_loss_pattern_heavy(episode_length=400, dimension=((8 * 6) * 6), p_matrix=prob_transition_matrix_heavy)
elif (env == 'basic_mac_3s_vs_4z'):
loss = generate_loss_pattern_heavy(episode_length=400, dimension=((8 * 3) * 3), p_matrix=prob_transition_matrix_heavy)
elif (env == 'basic_mac_3s_vs_5z'):
loss = generate_loss_pattern_heavy(episode_length=400, dimension=((8 * 3) * 3), p_matrix=prob_transition_matrix_heavy)
elif (env == 'basic_mac_2c_vs_64zg'):
loss = generate_loss_pattern_heavy(episode_length=400, dimension=((8 * 2) * 2), p_matrix=prob_transition_matrix_heavy)
elif (env == 'basic_mac_corridor'):
loss = generate_loss_pattern_heavy(episode_length=500, dimension=((8 * 6) * 6), p_matrix=prob_transition_matrix_heavy)
elif (env == 'basic_mac_3s5z'):
loss = generate_loss_pattern_heavy(episode_length=400, dimension=((8 * 8) * 8), p_matrix=prob_transition_matrix_heavy)
return loss |
def truncated_normal_logZ(r0, v0, zmin, zmax):
g0 = truncated_normal_log_proba(r0, v0, zmin, zmax)
logZ = (((0.5 * np.log(((2 * np.pi) * v0))) + ((0.5 * (r0 ** 2)) / v0)) + g0)
return logZ |
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', action='store_true')
args = parser.parse_args()
env = d3rlpy.envs.Atari(gym.make(args.env), num_stack=4)
eval_env = d3rlpy.envs.Atari(gym.make(args.env), num_stack=4, is_eval=True)
d3rlpy.seed(args.seed)
d3rlpy.envs.seed_env(env, args.seed)
d3rlpy.envs.seed_env(eval_env, args.seed)
dqn = d3rlpy.algos.DoubleDQNConfig(batch_size=32, learning_rate=0.00025, optim_factory=d3rlpy.models.optimizers.RMSpropFactory(), target_update_interval=10000, observation_scaler=d3rlpy.preprocessing.PixelObservationScaler()).create(device=args.gpu)
buffer = d3rlpy.dataset.create_fifo_replay_buffer(limit=1000000, transition_picker=d3rlpy.dataset.FrameStackTransitionPicker(n_frames=4), writer_preprocessor=d3rlpy.dataset.LastFrameWriterPreprocess(), env=env)
explorer = d3rlpy.algos.LinearDecayEpsilonGreedy(start_epsilon=1.0, end_epsilon=0.1, duration=1000000)
dqn.fit_online(env, buffer, explorer, eval_env=eval_env, eval_epsilon=0.01, n_steps=, n_steps_per_epoch=100000, update_interval=4, update_start_step=50000) |
def test_map_map_indirect():
def loop_with_value(A: dace.float64[(20, 20)], ind: dace.int64[20]):
for i in dace.map[0:20]:
for j in dace.map[0:ind[i]]:
A[(i, j)] = j
A = np.random.rand(20, 20)
ind = np.random.randint(low=0, high=19, size=(20,), dtype=np.int64)
expected = A.copy()
loop_with_value(A, ind)
loop_with_value.f(expected, ind)
assert np.allclose(A, expected) |
def get_method_code(source_code, start_line, end_line):
try:
if (source_code is not None):
code = '\n'.join(source_code.split('\n')[(int(start_line) - 1):int(end_line)])
return code
else:
return None
except Exception as e:
cf.logger.warning(f'Problem while extracting method code from the changed file contents: {e}')
pass |
class BasicTokenizer(object):
def __init__(self, do_lower_case=True, vocab=tuple()):
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) |
def thread_pool_executor(gen_func: Callable, batch_inputs: List[Any], unordered: bool=True, sequential_generation: bool=False, show_progress: bool=True, num_threads: int=10, request_timeout: int=60, enable_timer: bool=True) -> List[Any]:
def worker_thread(inputs):
while True:
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
future = executor.submit(gen_func, inputs)
try:
result = future.result(timeout=request_timeout)
return result
except concurrent.futures.TimeoutError as e:
executor.shutdown(wait=False)
start = time.time()
with ThreadPool(num_threads) as pool:
if sequential_generation:
print(f'Running in sequential mode!')
iter = map(gen_func, batch_inputs)
else:
print(f'Running in threaded mode with {num_threads} threads!')
if unordered:
iter = pool.imap_unordered(worker_thread, batch_inputs)
else:
iter = pool.imap(worker_thread, batch_inputs)
results = list(tqdm.tqdm(iter, total=len(batch_inputs), disable=(not show_progress)))
if enable_timer:
print(f'Run {len(batch_inputs)} calls took {(time.time() - start):.2f}s')
return list(results) |
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0, normalize=False, activation='ReLU', softplus_beta=1):
super(WideResNet, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) / 6)
block = BasicBlock
self.normalize = normalize
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
self.bn1 = nn.BatchNorm2d(nChannels[3])
if (activation == 'ReLU'):
self.relu = nn.ReLU(inplace=True)
elif (activation == 'Softplus'):
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
elif (activation == 'GELU'):
self.relu = nn.GELU()
elif (activation == 'ELU'):
self.relu = nn.ELU(alpha=1.0, inplace=True)
print(('Use activation of ' + activation))
if self.normalize:
self.fc = nn.Linear(nChannels[3], num_classes, bias=False)
else:
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif (isinstance(m, nn.Linear) and (not self.normalize)):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.nChannels)
if self.normalize:
out = F.normalize(out, p=2, dim=1)
for (_, module) in self.fc.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
out = self.fc(out)
return out |
class ISTFT(nn.Module):
def __init__(self, complex=True, log_amp=False, length=16384):
super().__init__()
self.amp2db = audio_nn.DbToAmplitude()
self.complex = complex
self.log_amp = log_amp
self.length = length
def forward(self, Y_hat):
num_batch = Y_hat.shape[0]
num_channel = Y_hat.shape[1]
Y_hat = Y_hat.view((Y_hat.shape[0] * Y_hat.shape[1]), Y_hat.shape[2], Y_hat.shape[3], Y_hat.shape[4])
y_hat = istft(Y_hat, hop_length=HOP_LENGTH, win_length=N_FFT, length=self.length)
y_hat = y_hat.view(num_batch, num_channel, (- 1))
return y_hat |
def scale_and_shift(x, gamma_init=1.0, beta_init=0.0):
num_channels = x.shape[(- 1)].value
with tf.variable_scope('scale_and_shift'):
gamma = tf.get_variable('alpha', (), initializer=tf.constant_initializer(gamma_init), regularizer=slim.l2_regularizer(0.0), dtype=tf.float32)
beta = tf.get_variable('gamma', (), initializer=tf.constant_initializer(beta_init), dtype=tf.float32)
x = ((gamma * x) + beta)
return x |
class Conv2dWS(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(Conv2dWS, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
weight = (weight - weight_mean)
std = (weight.view(weight.size(0), (- 1)).std(dim=1).view((- 1), 1, 1, 1) + 1e-05)
weight = (weight / std.expand_as(weight))
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) |
class Function_arctan2(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'arctan2', nargs=2, latex_name='\\arctan', conversions=dict(maxima='atan2', sympy='atan2', giac='atan2')) |
def get_lr(policy, base_lr, warmup_start_lr, global_step, num_optimizer_steps, num_warmup_steps):
return lr_policy.get_lr(policy, base_lr, warmup_start_lr, global_step, num_optimizer_steps, num_warmup_steps) |
class FairseqMultiModel(BaseFairseqModel):
def __init__(self, encoders, decoders):
super().__init__()
assert (encoders.keys() == decoders.keys())
self.keys = list(encoders.keys())
for key in self.keys:
check_type(encoders[key], FairseqEncoder)
check_type(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict({key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys})
def build_shared_embeddings(dicts: Dict[(str, Dictionary)], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str]=None):
shared_dict = dicts[langs[0]]
if any(((dicts[lang] != shared_dict) for lang in langs)):
raise ValueError('--share-*-embeddings requires a joined dictionary: --share-encoder-embeddings requires a joined source dictionary, --share-decoder-embeddings requires a joined target dictionary, and --share-all-embeddings requires a joint source + target dictionary.')
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
raise NotImplementedError
def max_positions(self):
return {key: (self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions()) for key in self.keys}
def max_decoder_positions(self):
return min((model.decoder.max_positions() for model in self.models.values()))
def encoder(self):
return self.models[self.keys[0]].encoder
def decoder(self):
return self.models[self.keys[0]].decoder
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def load_state_dict(self, state_dict, strict=True, model_cfg=None, args: Optional[Namespace]=None):
if ((model_cfg is None) and (args is not None)):
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict) |
def wer(r, h):
d = numpy.zeros(((len(r) + 1) * (len(h) + 1)), dtype=numpy.uint8).reshape(((len(r) + 1), (len(h) + 1)))
for i in range((len(r) + 1)):
for j in range((len(h) + 1)):
if (i == 0):
d[0][j] = j
elif (j == 0):
d[i][0] = i
for i in range(1, (len(r) + 1)):
for j in range(1, (len(h) + 1)):
if (r[(i - 1)] == h[(j - 1)]):
d[i][j] = d[(i - 1)][(j - 1)]
else:
substitute = (d[(i - 1)][(j - 1)] + 1)
insert = (d[i][(j - 1)] + 1)
delete = (d[(i - 1)][j] + 1)
d[i][j] = min(substitute, insert, delete)
result = ((float(d[len(r)][len(h)]) / len(r)) * 100)
return result |
def get_lambda(n_images, p_pixel, sigma, spec_rad):
return ((sigma * np.sqrt(np.max([(n_images + 1), p_pixel]))) * spec_rad) |
def _compute_softmax(scores):
if (not scores):
return []
max_score = None
for score in scores:
if ((max_score is None) or (score > max_score)):
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = np.exp((score - max_score))
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append((score / total_sum))
return probs |
def show(x=None, *args, **kwargs):
flush(*args, **kwargs)
if (x is not None):
display(blocks(x, *args, **kwargs)) |
def event_read_multiple_bytes(dat):
with tempfile.NamedTemporaryFile() as dat_f:
dat_f.write(dat)
dat_f.flush()
idx = index_log(dat_f.name)
return [capnp_log.Event.from_bytes(dat[idx[i]:idx[(i + 1)]]) for i in range((len(idx) - 1))] |
def schedule(epoch, initial_learning_rate, lr_decay_start_epoch):
if (epoch < lr_decay_start_epoch):
return initial_learning_rate
else:
return (initial_learning_rate * math.exp(((10 * initial_learning_rate) * (lr_decay_start_epoch - epoch)))) |
class TestKPIDataComplesAllBitwidth(KPIDataBaseTestClass):
def run_test(self):
model = ComplexModel()
sum_parameters = model.parameters_sum()
max_tensor = model.max_tensor()
mp_bitwidth_candidates_list = [(i, j) for i in [8, 4, 2] for j in [8, 4, 2]]
kpi_data = prep_test(model, mp_bitwidth_candidates_list, large_random_datagen)
self.verify_results(kpi_data, sum_parameters, max_tensor) |
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if (key in _conv2d_gradfix_cache):
return _conv2d_gradfix_cache[key]
assert (groups >= 1)
assert (len(weight_shape) == (ndim + 2))
assert all(((stride[i] >= 1) for i in range(ndim)))
assert all(((padding[i] >= 0) for i in range(ndim)))
assert all(((dilation[i] >= 0) for i in range(ndim)))
if (not transpose):
assert all(((output_padding[i] == 0) for i in range(ndim)))
else:
assert all(((0 <= output_padding[i] < max(stride[i], dilation[i])) for i in range(ndim)))
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [(((input_shape[(i + 2)] - ((output_shape[(i + 2)] - 1) * stride[i])) - (1 - (2 * padding[i]))) - (dilation[i] * (weight_shape[(i + 2)] - 1))) for i in range(ndim)]
class Conv2d(torch.autograd.Function):
def forward(ctx, input, weight, bias):
assert (weight.shape == weight_shape)
if (not transpose):
output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
else:
output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
ctx.save_for_backward(input, weight, bias)
return output
def backward(ctx, grad_output):
(input, weight, bias) = ctx.saved_tensors
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
assert (grad_input.shape == input.shape)
if (ctx.needs_input_grad[1] and (not weight_gradients_disabled)):
grad_weight = Conv2dGradWeight.apply(grad_output, input, bias)
assert (grad_weight.shape == weight_shape)
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return (grad_input, grad_weight, grad_bias)
class Conv2dGradWeight(torch.autograd.Function):
def forward(ctx, grad_output, input, bias):
bias_shape = (bias.shape if (bias is not None) else None)
empty_weight = torch.tensor(0.0, dtype=input.dtype, device=input.device).expand(weight_shape)
grad_weight = torch.ops.aten.convolution_backward(grad_output, input, empty_weight, bias_sizes=bias_shape, stride=stride, padding=padding, dilation=dilation, transposed=transpose, output_padding=output_padding, groups=groups, output_mask=[0, 1, 0])[1]
assert (grad_weight.shape == weight_shape)
ctx.save_for_backward(grad_output, input)
return grad_weight
def backward(ctx, grad2_grad_weight):
(grad_output, input) = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert (grad2_grad_output.shape == grad_output.shape)
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
assert (grad2_input.shape == input.shape)
return (grad2_grad_output, grad2_input)
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d |
class StartupTime(Experiment):
def __init__(self, config: ExperimentConfig):
super().__init__(config)
def name() -> str:
return 'startup-time'
def typename() -> str:
return 'Experiment.StartupTime' |
def validate_eu_eic(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(eic.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(eic.is_valid)
else:
return df.applymap(eic.is_valid)
return eic.is_valid(df) |
def _best_version(fields):
def _has_marker(keys, markers):
for marker in markers:
if (marker in keys):
return True
return False
keys = []
for (key, value) in fields.items():
if (value in ([], 'UNKNOWN', None)):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1']
for key in keys:
if ((key not in _241_FIELDS) and ('1.0' in possible_versions)):
possible_versions.remove('1.0')
logger.debug('Removed 1.0 due to %s', key)
if ((key not in _314_FIELDS) and ('1.1' in possible_versions)):
possible_versions.remove('1.1')
logger.debug('Removed 1.1 due to %s', key)
if ((key not in _345_FIELDS) and ('1.2' in possible_versions)):
possible_versions.remove('1.2')
logger.debug('Removed 1.2 due to %s', key)
if ((key not in _566_FIELDS) and ('1.3' in possible_versions)):
possible_versions.remove('1.3')
logger.debug('Removed 1.3 due to %s', key)
if ((key not in _566_FIELDS) and ('2.1' in possible_versions)):
if (key != 'Description'):
possible_versions.remove('2.1')
logger.debug('Removed 2.1 due to %s', key)
if ((key not in _426_FIELDS) and ('2.0' in possible_versions)):
possible_versions.remove('2.0')
logger.debug('Removed 2.0 due to %s', key)
if (len(possible_versions) == 1):
return possible_versions[0]
elif (len(possible_versions) == 0):
logger.debug('Out of options - unknown metadata set: %s', fields)
raise MetadataConflictError('Unknown metadata set')
is_1_1 = (('1.1' in possible_versions) and _has_marker(keys, _314_MARKERS))
is_1_2 = (('1.2' in possible_versions) and _has_marker(keys, _345_MARKERS))
is_2_1 = (('2.1' in possible_versions) and _has_marker(keys, _566_MARKERS))
is_2_0 = (('2.0' in possible_versions) and _has_marker(keys, _426_MARKERS))
if ((((int(is_1_1) + int(is_1_2)) + int(is_2_1)) + int(is_2_0)) > 1):
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields')
if ((not is_1_1) and (not is_1_2) and (not is_2_1) and (not is_2_0)):
if (PKG_INFO_PREFERRED_VERSION in possible_versions):
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
if is_2_1:
return '2.1'
return '2.0' |
def test_replace_ref_nodes_with_names_dicts():
class Model(optplan.ProblemGraphNode):
type = types.StringType(default='Model')
value = types.DictType(optplan.ReferenceType(optplan.ProblemGraphNode))
modelb1 = ModelB(name='m1', int_field=1)
modelb2 = ModelB(name='m2', int_field=2)
model = Model(name='m3', value={'1': modelb1, '2': modelb2})
model_list = [modelb1, modelb2, model]
io._replace_ref_nodes_with_names(model, model_list)
assert (model.value == {'1': modelb1.name, '2': modelb2.name}) |
class WebcamFaceDetector():
def __init__(self, device=torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))):
print('loading ...')
self.detector = FaceDetector(face_size=(224, 224), device=device)
def run(self, camera_index=0):
cap = cv2.VideoCapture(camera_index)
cap.set(3, 1280)
cap.set(4, 720)
print('type q for exit')
while cap.isOpened():
(ret, frame) = cap.read()
if (ret == False):
raise Exception(('the camera not recognized: change camera_index param to ' + str((0 if (camera_index == 1) else 1))))
(faces, boxes, scores, landmarks) = self.detector.detect_align(frame)
if (len(faces.shape) > 1):
for (idx, bbox) in enumerate(boxes):
special_draw(frame, bbox, landmarks[idx], name='face', score=scores[idx])
cv2.imshow('frame', frame)
if ((cv2.waitKey(1) & 255) == ord('q')):
break
cap.release()
cv2.destroyAllWindows() |
def eliminate_existential_quantifiers_from_conditional_effects(task):
for action in task.actions:
for effect in action.effects:
condition = effect.condition
if isinstance(condition, pddl.ExistentialCondition):
effect.parameters = list(effect.parameters)
effect.parameters.extend(condition.parameters)
effect.condition = condition.parts[0] |
def train(args, train_dataset, model, tokenizer):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = TableLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.module.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
(input_tok, input_type, input_mask, labels) = batch
input_tok = input_tok.to(args.device)
input_type = input_type.to(args.device)
input_mask = input_mask.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(input_ids=input_tok, attention_mask=input_mask, token_type_ids=input_type, masked_lm_labels=labels)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
checkpoint_prefix = 'checkpoint'
output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
def eval_mon_op(args):
if ((args[1] != 'True') and (args[1] != 'False')):
val = vars[args[1]]
else:
val = args[1]
if (val == 'True'):
return 'False'
else:
return 'True' |
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path, python_command='python', script='scripts/run_experiment.py', aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False, sync_s3_png=False, sync_s3_log=False, sync_log_on_termination=True, periodic_sync=True, periodic_sync_interval=15):
if (len(params_list) == 0):
return
default_config = dict(image_id=config.AWS_IMAGE_ID, instance_type=config.AWS_INSTANCE_TYPE, key_name=config.AWS_KEY_NAME, spot=config.AWS_SPOT, spot_price=config.AWS_SPOT_PRICE, iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME, security_groups=config.AWS_SECURITY_GROUPS, security_group_ids=config.AWS_SECURITY_GROUP_IDS, network_interfaces=config.AWS_NETWORK_INTERFACES)
if (aws_config is None):
aws_config = dict()
aws_config = dict(default_config, **aws_config)
sio = StringIO()
sio.write('#!/bin/bash\n')
sio.write('{\n')
sio.write('\n die() { status=$1; shift; echo "FATAL: $*"; exit $status; }\n ')
sio.write('\n EC2_INSTANCE_ID="`wget -q -O - ')
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n '.format(exp_name=params_list[0].get('exp_name'), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}\n '.format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}\n '.format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write('\n service docker start\n ')
sio.write('\n docker --config /home/ubuntu/.docker pull {docker_image}\n '.format(docker_image=docker_image))
sio.write('\n export AWS_DEFAULT_REGION={aws_region}\n '.format(aws_region=config.AWS_REGION_NAME))
if config.FAST_CODE_SYNC:
sio.write('\n aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
sio.write('\n mkdir -p {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
sio.write('\n tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
else:
sio.write('\n aws s3 cp --recursive {code_full_path} {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
s3_mujoco_key_path = (config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/')
sio.write('\n aws s3 cp --recursive {} {}\n '.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))
sio.write('\n cd {local_code_path}\n '.format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get('log_dir')
remote_log_dir = params.pop('remote_log_dir')
env = params.pop('env', None)
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n '.format(exp_name=params.get('exp_name'), aws_region=config.AWS_REGION_NAME))
sio.write('\n mkdir -p {log_dir}\n '.format(log_dir=log_dir))
if periodic_sync:
include_png = (" --include '*.png' " if sync_s3_png else ' ')
include_pkl = (" --include '*.pkl' " if sync_s3_pkl else ' ')
include_log = (" --include '*.log' " if sync_s3_log else ' ')
sio.write("\n while /bin/true; do\n aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log, log_dir=log_dir, remote_log_dir=remote_log_dir, periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
sio.write('\n while /bin/true; do\n if [ -z $(curl -Is | head -1 | grep 404 | cut -d \\ -f 2) ]\n then\n logger "Running shutdown hook."\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log\n aws s3 cp --recursive {log_dir} {remote_log_dir}\n break\n else\n # Spot instance not yet marked for termination.\n sleep 5\n fi\n done & echo log sync initiated\n '.format(log_dir=log_dir, remote_log_dir=remote_log_dir))
if use_gpu:
sio.write('\n for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done\n systemctl start nvidia-docker\n ')
sio.write('\n {command}\n '.format(command=to_docker_command(params, docker_image, python_command=python_command, script=script, use_gpu=use_gpu, env=env, local_code_dir=config.DOCKER_CODE_DIR)))
sio.write('\n aws s3 cp --recursive {log_dir} {remote_log_dir}\n '.format(log_dir=log_dir, remote_log_dir=remote_log_dir))
sio.write('\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log\n '.format(remote_log_dir=remote_log_dir))
if terminate_machine:
sio.write('\n EC2_INSTANCE_ID="`wget -q -O - || die "wget instance-id has failed: $?"`"\n aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}\n '.format(aws_region=config.AWS_REGION_NAME))
sio.write('} >> /home/ubuntu/user_data.log 2>&1\n')
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config['spot']:
ec2 = boto3.client('ec2', region_name=config.AWS_REGION_NAME, aws_access_key_id=config.AWS_ACCESS_KEY, aws_secret_access_key=config.AWS_ACCESS_SECRET)
else:
ec2 = boto3.resource('ec2', region_name=config.AWS_REGION_NAME, aws_access_key_id=config.AWS_ACCESS_KEY, aws_secret_access_key=config.AWS_ACCESS_SECRET)
if ((len(full_script) > 10000) or (len(base64.b64encode(full_script.encode()).decode('utf-8')) > 10000)):
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write('#!/bin/bash\n')
sio.write('\n aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\\n chmod +x /home/ubuntu/remote_script.sh && \\\n bash /home/ubuntu/remote_script.sh\n '.format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
print(full_script)
with open('/tmp/full_script', 'w') as f:
f.write(full_script)
instance_args = dict(ImageId=aws_config['image_id'], KeyName=aws_config['key_name'], UserData=user_data, InstanceType=aws_config['instance_type'], EbsOptimized=config.EBS_OPTIMIZED, SecurityGroups=aws_config['security_groups'], SecurityGroupIds=aws_config['security_group_ids'], NetworkInterfaces=aws_config['network_interfaces'], IamInstanceProfile=dict(Name=aws_config['iam_instance_profile_name']), **config.AWS_EXTRA_CONFIGS)
if (len(instance_args['NetworkInterfaces']) > 0):
disable_security_group = True
if disable_security_group:
instance_args.pop('SecurityGroups')
instance_args.pop('SecurityGroupIds')
if (aws_config.get('placement', None) is not None):
instance_args['Placement'] = aws_config['placement']
if (not aws_config['spot']):
instance_args['MinCount'] = 1
instance_args['MaxCount'] = 1
print('')
print(instance_args['UserData'])
print('')
if aws_config['spot']:
instance_args['UserData'] = base64.b64encode(instance_args['UserData'].encode()).decode('utf-8')
spot_args = dict(DryRun=dry, InstanceCount=1, LaunchSpecification=instance_args, SpotPrice=aws_config['spot_price'])
import pprint
pprint.pprint(spot_args)
if (not dry):
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(Resources=[spot_request_id], Tags=[{'Key': 'Name', 'Value': params_list[0]['exp_name']}])
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(DryRun=dry, **instance_args) |
def accuracy(logits, labels):
assert (len(logits) == len(labels))
if (len(np.shape(logits)) > 1):
predicted_labels = np.argmax(logits, axis=1)
else:
assert (len(np.shape(logits)) == 1)
predicted_labels = logits
correct = np.sum((predicted_labels == labels.reshape(len(labels))))
accuracy = (float(correct) / len(labels))
return accuracy |
def reducible_primes_naive(E, max_l=None, num_P=None, verbose=False):
if (max_l is None):
max_l = 1000
if (num_P is None):
num_P = 100
if verbose:
print('E = {}, finding reducible primes up to {} using Frobenius filter with {} primes'.format(E.ainvs(), max_l, num_P))
B = Frobenius_filter(E, primes(max_l), num_P)
if verbose:
print('... returning {}'.format(B))
return B |
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def print_network(self):
num_params = 0
for param in self.parameters():
num_params += param.numel()
print('Network [{}] was created. Total number of parameters: {:.1f} million. To see the architecture, do print(network).'.format(self.__class__.__name__, (num_params / 1000000)))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if ('BatchNorm2d' in classname):
if (hasattr(m, 'weight') and (m.weight is not None)):
init.normal_(m.weight.data, 1.0, gain)
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif ((('Conv' in classname) or ('Linear' in classname)) and hasattr(m, 'weight')):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'xavier_uniform'):
init.xavier_uniform_(m.weight.data, gain=1.0)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=gain)
elif (init_type == 'none'):
m.reset_parameters()
else:
raise NotImplementedError("initialization method '{}' is not implemented".format(init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
self.apply(init_func)
def forward(self, *inputs):
pass |
class CategoricalColumnWithVocabularyList(CategoricalColumnTransformer):
def __init__(self, key, vocabulary_list):
self.key = key
self.vocabulary_list = vocabulary_list
def _set_feature_column_names(self, names):
CategoricalColumnTransformer._set_feature_column_names(self, names)
self.column_idx = self.names.index(self.key)
def get_feature_column_names(self):
return [self.key]
def num_classes(self):
return len(self.vocabulary_list)
def __call__(self, inputs):
fn = (lambda x: self.vocabulary_list.index(x))
def transform_fn(slot_value):
if isinstance(slot_value, np.ndarray):
output = elementwise_transform(slot_value, fn).astype(np.int64)
else:
output = fn(slot_value)
return output
return apply_transform_on_value(inputs[self.column_idx], transform_fn) |
class Partition3(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:3'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.9.layer.0.SelfAttention.q', 'l_1': 'encoder.block.9.layer.0.SelfAttention.k', 'l_2': 'encoder.block.9.layer.0.SelfAttention.v', 'l_3': 'encoder.block.9.layer.0.SelfAttention.o', 'l_4': 'encoder.block.9.layer.0.dropout', 'l_5': 'encoder.block.9.layer.1.layer_norm', 'l_6': 'encoder.block.9.layer.1.DenseReluDense.wi', 'l_7': 'encoder.block.9.layer.1.DenseReluDense.dropout', 'l_8': 'encoder.block.9.layer.1.DenseReluDense.wo', 'l_9': 'encoder.block.9.layer.1.dropout', 'l_10': 'encoder.block.10.layer.0.layer_norm', 'l_11': 'encoder.block.10.layer.0.SelfAttention.q', 'l_12': 'encoder.block.10.layer.0.SelfAttention.k', 'l_13': 'encoder.block.10.layer.0.SelfAttention.v', 'l_14': 'encoder.block.10.layer.0.SelfAttention.o', 'l_15': 'encoder.block.10.layer.0.dropout', 'l_16': 'encoder.block.10.layer.1.layer_norm', 'l_17': 'encoder.block.10.layer.1.DenseReluDense.wi', 'l_18': 'encoder.block.10.layer.1.DenseReluDense.dropout', 'l_19': 'encoder.block.10.layer.1.DenseReluDense.wo', 'l_20': 'encoder.block.10.layer.1.dropout', 'l_21': 'encoder.block.11.layer.0.layer_norm', 'l_22': 'encoder.block.11.layer.0.SelfAttention.q', 'l_23': 'encoder.block.11.layer.0.SelfAttention.k', 'l_24': 'encoder.block.11.layer.0.SelfAttention.v', 'l_25': 'encoder.block.11.layer.0.SelfAttention.o', 'l_26': 'encoder.block.11.layer.0.dropout', 'l_27': 'encoder.block.11.layer.1.layer_norm', 'l_28': 'encoder.block.11.layer.1.DenseReluDense.wi', 'l_29': 'encoder.block.11.layer.1.DenseReluDense.dropout', 'l_30': 'encoder.block.11.layer.1.DenseReluDense.wo', 'l_31': 'encoder.block.11.layer.1.dropout'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1)
t_1 = self.l_1(x1)
t_2 = self.l_2(x1)
t_3 = x1.shape
t_3 = t_3[slice(None, 2, None)]
t_3 = t_3[0]
t_0 = t_0.view(t_3, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_1 = t_1.view(t_3, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_2 = t_2.view(t_3, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_1 = t_1.transpose(3, 2)
t_1 = torch.matmul(t_0, t_1)
t_1 += x2
t_0 = t_1.float()
t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None)
t_1 = t_0.type_as(t_1)
t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False)
t_2 = torch.matmul(t_1, t_2)
t_2 = t_2.transpose(1, 2)
t_2 = t_2.contiguous()
t_3 = t_2.view(t_3, (- 1), 4096)
t_3 = self.l_3(t_3)
t_2 = self.l_4(t_3)
t_2 = (x0 + t_2)
t_3 = (t_3, None, x2)
t_1 = t_3[0]
t_2 = (t_2,)
t_3 = t_3[slice(1, None, None)]
t_3 = (t_2 + t_3)
t_2 = t_3[slice(None, 2, None)]
t_0 = t_2[0]
t_4 = self.l_5(t_0)
t_2 = t_2[1]
t_3 = t_3[slice(2, None, None)]
t_4 = self.l_6(t_4)
t_4 = torch.nn.functional.relu(t_4, inplace=False)
t_4 = self.l_7(t_4)
t_4 = self.l_8(t_4)
t_4 = self.l_9(t_4)
t_4 = (t_0 + t_4)
t_2 = (t_4, t_2)
t_3 = (t_2 + t_3)
t_2 = t_3[slice(None, 2, None)]
t_2 = t_2[0]
t_4 = self.l_10(t_2)
t_3 = t_3[2]
t_0 = self.l_11(t_4)
t_5 = self.l_12(t_4)
t_6 = self.l_13(t_4)
t_4 = t_4.shape
t_4 = t_4[slice(None, 2, None)]
t_4 = t_4[0]
t_0 = t_0.view(t_4, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_5 = t_5.view(t_4, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_6 = t_6.view(t_4, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_5 = t_5.transpose(3, 2)
t_5 = torch.matmul(t_0, t_5)
t_5 += t_3
t_0 = t_5.float()
t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None)
t_5 = t_0.type_as(t_5)
t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False)
t_6 = torch.matmul(t_5, t_6)
t_6 = t_6.transpose(1, 2)
t_6 = t_6.contiguous()
t_4 = t_6.view(t_4, (- 1), 4096)
t_4 = self.l_14(t_4)
t_6 = self.l_15(t_4)
t_6 = (t_2 + t_6)
t_3 = (t_4, None, t_3)
t_4 = t_3[0]
t_6 = (t_6,)
t_3 = t_3[slice(1, None, None)]
t_3 = (t_6 + t_3)
t_6 = t_3[slice(None, 2, None)]
t_2 = t_6[0]
t_5 = self.l_16(t_2)
t_6 = t_6[1]
t_3 = t_3[slice(2, None, None)]
t_5 = self.l_17(t_5)
t_5 = torch.nn.functional.relu(t_5, inplace=False)
t_5 = self.l_18(t_5)
t_5 = self.l_19(t_5)
t_5 = self.l_20(t_5)
t_5 = (t_2 + t_5)
t_6 = (t_5, t_6)
t_3 = (t_6 + t_3)
t_6 = t_3[slice(None, 2, None)]
t_6 = t_6[0]
t_5 = self.l_21(t_6)
t_3 = t_3[2]
t_2 = self.l_22(t_5)
t_0 = self.l_23(t_5)
t_7 = self.l_24(t_5)
t_5 = t_5.shape
t_5 = t_5[slice(None, 2, None)]
t_5 = t_5[0]
t_2 = t_2.view(t_5, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_0 = t_0.view(t_5, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_7 = t_7.view(t_5, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_0 = t_0.transpose(3, 2)
t_0 = torch.matmul(t_2, t_0)
t_0 += t_3
t_2 = t_0.float()
t_2 = torch.nn.functional.softmax(t_2, dim=(- 1), _stacklevel=3, dtype=None)
t_0 = t_2.type_as(t_0)
t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False)
t_7 = torch.matmul(t_0, t_7)
t_7 = t_7.transpose(1, 2)
t_7 = t_7.contiguous()
t_5 = t_7.view(t_5, (- 1), 4096)
t_5 = self.l_25(t_5)
t_7 = self.l_26(t_5)
t_7 = (t_6 + t_7)
t_3 = (t_5, None, t_3)
t_5 = t_3[0]
t_7 = (t_7,)
t_3 = t_3[slice(1, None, None)]
t_3 = (t_7 + t_3)
t_7 = t_3[slice(None, 2, None)]
t_6 = t_7[0]
t_0 = self.l_27(t_6)
t_7 = t_7[1]
t_3 = t_3[slice(2, None, None)]
t_0 = self.l_28(t_0)
t_0 = torch.nn.functional.relu(t_0, inplace=False)
t_0 = self.l_29(t_0)
t_0 = self.l_30(t_0)
t_0 = self.l_31(t_0)
t_0 = (t_6 + t_0)
t_7 = (t_0, t_7)
t_3 = (t_7 + t_3)
t_7 = t_3[slice(None, 2, None)]
t_7 = t_7[0]
t_3 = t_3[2]
return list(flatten((t_7, t_3)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def TrainNet(Unet_chi, Unet_lfs, LR=0.001, Batchsize=32, Epoches=100, useGPU=True):
print('IniReconNet')
print('DataLoad')
trainloader = DataLoad(Batchsize)
print('Dataload Ends')
print('Training Begins')
criterion = nn.MSELoss(reduction='sum')
optimizer1 = optim.Adam(Unet_chi.parameters())
optimizer2 = optim.Adam(Unet_lfs.parameters())
scheduler1 = LS.MultiStepLR(optimizer1, milestones=[50, 80], gamma=0.1)
scheduler2 = LS.MultiStepLR(optimizer2, milestones=[50, 80], gamma=0.1)
matD = scio.loadmat('Dipole_128.mat', verify_compressed_data_integrity=False)
D = matD['D']
D = np.array(D)
D = torch.from_numpy(D)
D = D.float()
LGOP = scio.loadmat('3D_Laplacian_Operator.mat', verify_compressed_data_integrity=False)
conv_op = LGOP['LM']
conv_op = np.array(conv_op)
conv_op = torch.from_numpy(conv_op)
conv_op = conv_op.float()
conv_op = torch.unsqueeze(conv_op, 0)
conv_op = torch.unsqueeze(conv_op, 0)
LPLayer_chi = LapLayer(conv_op)
optimizer3 = optim.Adam(LPLayer_chi.parameters())
scheduler3 = LS.MultiStepLR(optimizer3, milestones=[50, 80], gamma=0.1)
LPLayer_lfs = LapLayer(conv_op)
optimizer4 = optim.Adam(LPLayer_lfs.parameters())
scheduler4 = LS.MultiStepLR(optimizer4, milestones=[50, 80], gamma=0.1)
time_start = time.time()
if useGPU:
if torch.cuda.is_available():
print(torch.cuda.device_count(), 'Available GPUs!')
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
Unet_chi = nn.DataParallel(Unet_chi)
Unet_chi.to(device)
Unet_lfs = nn.DataParallel(Unet_lfs)
Unet_lfs.to(device)
LPLayer_chi = nn.DataParallel(LPLayer_chi)
LPLayer_chi.to(device)
LPLayer_lfs = nn.DataParallel(LPLayer_lfs)
LPLayer_lfs.to(device)
D = D.to(device)
for epoch in range(1, (Epoches + 1)):
if ((epoch % 20) == 0):
SaveNet(Unet_chi, Unet_lfs, LPLayer_chi, LPLayer_lfs, epoch, enSave=False)
acc_loss = 0.0
for (i, data) in enumerate(trainloader):
(wphs, chis, lfss, TEs, masks, name) = data
wphs = wphs.to(device)
chis = chis.to(device)
lfss = lfss.to(device)
masks = masks.to(device)
TEs = TEs.to(device)
optimizer1.zero_grad()
optimizer2.zero_grad()
optimizer3.zero_grad()
optimizer4.zero_grad()
(b_i, d_i) = LPLayer_chi(wphs, masks, TEs)
(a_i, c_i) = LPLayer_lfs(wphs, masks, TEs)
pred_chi = Unet_chi(b_i, d_i)
pred_chi = (pred_chi / 4)
pred_lfs = Unet_lfs(a_i, c_i)
pred_lfs = (pred_lfs / 4)
loss1 = criterion((pred_chi * masks), (chis * masks))
loss2 = criterion((pred_lfs * masks), (lfss * masks))
loss3 = criterion((DataFidelity(pred_chi, D) * masks), (pred_lfs * masks))
loss = ((loss1 + loss2) + (0.1 * loss3))
loss.backward()
optimizer1.step()
optimizer2.step()
optimizer3.step()
optimizer4.step()
optimizer1.zero_grad()
optimizer2.zero_grad()
optimizer3.zero_grad()
optimizer4.zero_grad()
if ((i % 19) == 0):
acc_loss1 = loss1.item()
acc_loss2 = loss2.item()
acc_loss3 = loss3.item()
time_end = time.time()
print(('Outside: Epoch : %d, batch: %d, Loss1: %f, loss: %f, loss3: %f, lr1: %f, used time: %d s' % (epoch, (i + 1), acc_loss1, acc_loss2, acc_loss3, optimizer1.param_groups[0]['lr'], (time_end - time_start))))
scheduler1.step()
scheduler2.step()
scheduler3.step()
scheduler4.step()
else:
pass
print('No Cuda Device!')
quit()
print('Training Ends')
SaveNet(Unet_chi, Unet_lfs, LPLayer_chi, LPLayer_lfs, Epoches) |
class Cutout(DauphinTransform):
def __init__(self, name=None, prob=1.0, level=0, max_pixel=20, color=None):
self.max_pixel = max_pixel
self.value_range = (0, self.max_pixel)
self.color = color
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
pil_img = pil_img.copy()
degree = categorize_value(self.level, self.value_range, 'int')
(width, height) = pil_img.size
x0 = np.random.uniform(width)
y0 = np.random.uniform(height)
x0 = int(max(0, (x0 - (degree / 2.0))))
y0 = int(max(0, (y0 - (degree / 2.0))))
x1 = min(width, (x0 + degree))
y1 = min(height, (y0 + degree))
xy = (x0, y0, x1, y1)
if (self.color is not None):
color = self.color
elif (pil_img.mode == 'RGB'):
color = (125, 123, 114)
elif (pil_img.mode == 'L'):
color = 121
else:
raise ValueError(f'Unspported image mode {pil_img.mode}')
ImageDraw.Draw(pil_img).rectangle(xy, color)
return (pil_img, label)
def __repr__(self):
return f'<Transform ({self.name}), prob={self.prob}, level={self.level}, max_pixel={self.max_pixel}>' |
(scope='package')
def tensor_schema():
schema = TensorSchemaBuilder().categorical('item_id', cardinality=4, is_seq=True, embedding_dim=64, feature_hint=FeatureHint.ITEM_ID).categorical('some_item_feature', cardinality=4, is_seq=True, embedding_dim=32).categorical('some_user_feature', cardinality=4, is_seq=False, embedding_dim=64).numerical('some_num_feature', tensor_dim=64, is_seq=True).categorical('timestamp', cardinality=4, is_seq=True, embedding_dim=64, feature_hint=FeatureHint.TIMESTAMP).categorical('some_cat_feature', cardinality=4, is_seq=True, embedding_dim=64).build()
return schema |
def BModel2Bin(bmodel_file):
import math
class FName():
core_id = 0
subnet_id = 0
gid = 0
length = 0
suffix = ''
def __str__(self):
return (bmodel_file + f'.core({self.core_id}).subnet({self.subnet_id}).group({self.gid}).len({self.length}){self.suffix}')
fname = FName()
bmodel = dis.BModel(bmodel_file)
for subnet in BModelCMDIter(bmodel):
fname.core_id = 0
fname.subnet_id = subnet.id
for (fname.gid, cmds) in enumerate(subnet.cmd_group):
(fname.length, fname.suffix) = (cmds.tiu_num, '.tiu.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds.tiu_cmd))
(fname.length, fname.suffix) = (cmds.dma_num, '.dma.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds.dma_cmd))
for (fname.core_id, _cmds) in enumerate(subnet.core_commands):
for (fname.gid, cmds) in enumerate(_cmds.gdma_tiu_commands):
(fname.length, fname.suffix) = (cmds.tiu_num, '.tiu.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds.tiu_cmd))
(fname.length, fname.suffix) = (cmds.dma_num, '.dma.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds.dma_cmd))
for (fname.gid, cmds) in enumerate(_cmds.sdma_commands):
(fname.length, fname.suffix) = (math.ceil((len(cmds) / 96)), '.sdma.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds))
for (fname.gid, cmds) in enumerate(_cmds.hau_commands):
(fname.length, fname.suffix) = (math.ceil((len(cmds) / 80)), '.hau.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds))
for (fname.gid, cmds) in enumerate(_cmds.cdma_commands):
(fname.length, fname.suffix) = (math.ceil((len(cmds) / 120)), '.hau.bin')
with open(str(fname), 'wb') as f:
f.write(bytes(cmds)) |
def check_dir(module, module_name=None):
if (module_name is None):
module_name = module.__name__
results = {}
for name in dir(module):
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__') and (item.__module__ != module_name)):
results[name] = ((item.__module__ + '.') + item.__name__)
return results |
def var(key: str, *fallbacks: Optional[str], force: bool=False) -> Optional[str]:
if force:
value = None
else:
value = os.environ.get(key)
if (value is None):
try:
import sage_conf
value = getattr(sage_conf, key, None)
except ImportError:
pass
for f in fallbacks:
if (value is not None):
break
value = f
SAGE_ENV[key] = value
globals()[key] = value
return value |
def cau_metrics(preds, labels, cutoff=20):
recall = []
mrr = []
ndcg = []
for (batch, b_label) in zip(preds, labels):
ranks = ((batch[b_label] < batch).sum() + 1)
recall.append((ranks <= cutoff))
mrr.append(((1 / ranks) if (ranks <= cutoff) else 0.0))
ndcg.append(((1 / np.log2((ranks + 1))) if (ranks <= cutoff) else 0.0))
return (recall, mrr, ndcg) |
def get_device_map(n_layers, devices):
layers = list(range(n_layers))
n_blocks = int(ceil((n_layers / len(devices))))
layers_list = list((layers[i:(i + n_blocks)] for i in range(0, n_layers, n_blocks)))
return dict(zip(devices, layers_list)) |
_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
if torch.cuda.is_available():
try:
from apex.optimizers import FusedAdam as _FusedAdam
self._optimizer = FusedAdam(params, **self.optimizer_config)
except ImportError:
self._optimizer = Adam(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay}
def average_params(self):
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM) |
def im2heat(pred_dir, a, gt, exten='.png'):
pred_nm = ((pred_dir + a) + exten)
pred = cv2.imread(pred_nm, 0)
heatmap_img = cv2.applyColorMap(pred, cv2.COLORMAP_JET)
heatmap_img = convert(heatmap_img)
pred = np.stack((pred, pred, pred), 2).astype('float32')
pred = (pred / 255.0)
return np.uint8(((pred * heatmap_img) + ((1.0 - pred) * gt))) |
def vector_serializer(vector):
if isinstance(vector, numpy.ndarray):
vector = vector.tolist()
return vector |
def run_subprocess_py(file_name):
arguments = sys.argv[1:]
if arguments:
command = (['python3', file_name] + arguments)
else:
command = ['python3', file_name]
process = subprocess.Popen(command)
return_code = process.wait()
if (return_code != 0):
exit(1) |
class DownBlock2D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_downsample=True, downsample_padding=1):
super().__init__()
resnets = []
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList([Downsample2D(in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')])
else:
self.downsamplers = None
def forward(self, hidden_states, temb=None):
output_states = ()
for resnet in self.resnets:
hidden_states = resnet(hidden_states, temb)
output_states += (hidden_states,)
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return (hidden_states, output_states) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.