code stringlengths 101 5.91M |
|---|
class FlowGraph(FlowGraph):
def __init__(self, flow):
self.name = flow.__name__
self.nodes = self._create_nodes(flow)
self.doc = deindent_docstring(flow.__doc__)
self._traverse_graph()
self._postprocess()
def _create_nodes(self, flow):
module = __import__(flow.__module__)
tree = ast.parse(getsource(module)).body
root = [n for n in tree if (isinstance(n, ast.ClassDef) and (n.name == self.name))][0]
nodes = {}
StepVisitor(nodes, flow).visit(root)
return nodes |
def ref_hard_tanh_backward(x, dy, **kw):
return np.array([(dy if ((- 1) <= i <= 1) else 0) for i in np.nditer(x)]) |
class SymmetryFinder(object):
def __init__(self, loc):
self.loc = loc
def getChildrenNonZero(self, children):
cnt = 0
for c in children:
if (c.nb != 0):
cnt += 1
return cnt
def getSymmetryConstraints(self, node, pid):
if (node.nb == 0):
return ([], [], [])
name = ('x_' + str(node.nb))
node.var = Int(name)
if (node.children != []):
(res, vars, cur_val) = ([And((self.getChildrenNonZero(node.children) < node.var), (node.var < pid))], [node.var], [(node.var != node.nb)])
for c in node.children:
(vars_aux, res_aux, cur_val_aux) = self.getSymmetryConstraints(c, node.var)
res += res_aux
vars += vars_aux
cur_val += cur_val_aux
return (vars, res, cur_val)
else:
return ([node.var], [And((1 <= node.var), (node.var < pid))], [(node.var != node.nb)])
def allDiff(self, vars, solver):
for v in range(len(vars)):
for v_1 in range((v + 1), len(vars)):
if (vars[v] is not vars[v_1]):
solver.add((vars[v] != vars[v_1]))
def findSymmetries(self, last_line):
models = []
constraints = []
all_vars = []
current_values = []
for c in last_line.children:
if (c.nb == 0):
continue
(vars_aux, constraints_aux, cur_val) = self.getSymmetryConstraints(c, last_line.nb)
all_vars += vars_aux
constraints += constraints_aux
current_values += cur_val
sym_solver = Solver()
sym_solver.add(Or(current_values))
sym_solver.add(constraints)
self.allDiff(all_vars, sym_solver)
return getModels(sym_solver, all_vars) |
def _trim_arity(func, maxargs=2):
if (func in singleArgBuiltins):
return (lambda s, l, t: func(t))
limit = [0]
foundArity = [False]
if (system_version[:2] >= (3, 5)):
def extract_stack(limit=0):
offset = ((- 3) if (system_version == (3, 5, 0)) else (- 2))
frame_summary = traceback.extract_stack(limit=(((- offset) + limit) - 1))[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[(- 1)]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
LINE_DIFF = 6
this_line = extract_stack(limit=2)[(- 1)]
pa_call_line_synth = (this_line[0], (this_line[1] + LINE_DIFF))
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[(- 1)]
if (not (extract_tb(tb, limit=2)[(- 1)][:2] == pa_call_line_synth)):
raise
finally:
del tb
if (limit[0] <= maxargs):
limit[0] += 1
continue
raise
func_name = '<parse action>'
try:
func_name = getattr(func, '__name__', getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper |
class CliReporter(TextReporter):
def __init__(self, executes_verbose, ui):
super(CliReporter, self).__init__()
self._num_runs = None
self.ui = ui
self._runs_completed = 0
self._start_time = None
self._runs_remaining = 0
self._executes_verbose = executes_verbose
def run_failed(self, run_id, cmdline, return_code, cmd_output):
pass
def run_completed(self, run_id, statistics, cmdline):
self._runs_completed += 1
self._runs_remaining -= 1
def report_job_completed(self, run_ids):
self.ui.output(('\n\n' + format_pretty_table(self._generate_all_output(run_ids), ['Benchmark', 'Executor', 'Suite', 'Extra', 'Core', 'Size', 'Var', '#Samples', 'Mean (ms)'], vertical_bar=' ')))
def set_total_number_of_runs(self, num_runs):
self._num_runs = num_runs
self._runs_remaining = num_runs |
def mk_dotnet_wrappers(dotnet):
global Type2Str
dotnet.write('\n')
dotnet.write(' public static void Z3_set_error_handler(Z3_context a0, Z3_error_handler a1) {\n')
dotnet.write(' LIB.Z3_set_error_handler(a0, a1);\n')
dotnet.write(' Z3_error_code err = (Z3_error_code)LIB.Z3_get_error_code(a0);\n')
dotnet.write(' if (err != Z3_error_code.Z3_OK)\n')
dotnet.write(' throw new Z3Exception(Marshal.PtrToStringAnsi(LIB.Z3_get_error_msg(a0, (uint)err)));\n')
dotnet.write(' }\n\n')
for (name, result, params) in _dotnet_decls:
if (result == STRING):
dotnet.write((' public static string %s(' % name))
else:
dotnet.write((' public static %s %s(' % (type2dotnet(result), name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
dotnet.write(('%s a%d' % (param2dotnet(param), i)))
i = (i + 1)
dotnet.write(') {\n')
dotnet.write(' ')
if (result == STRING):
dotnet.write('IntPtr r = ')
elif (result != VOID):
dotnet.write(('%s r = ' % type2dotnet(result)))
dotnet.write(('LIB.%s(' % name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
if (param_kind(param) == OUT):
if (param_type(param) == STRING):
dotnet.write('out ')
else:
dotnet.write('ref ')
elif (param_kind(param) == OUT_MANAGED_ARRAY):
dotnet.write('out ')
dotnet.write(('a%d' % i))
i = (i + 1)
dotnet.write(');\n')
if (name not in Unwrapped):
if (name in NULLWrapped):
dotnet.write(' if (r == IntPtr.Zero)\n')
dotnet.write(' throw new Z3Exception("Object allocation failed.");\n')
elif ((len(params) > 0) and (param_type(params[0]) == CONTEXT)):
dotnet.write(' Z3_error_code err = (Z3_error_code)LIB.Z3_get_error_code(a0);\n')
dotnet.write(' if (err != Z3_error_code.Z3_OK)\n')
dotnet.write(' throw new Z3Exception(Marshal.PtrToStringAnsi(LIB.Z3_get_error_msg(a0, (uint)err)));\n')
if (result == STRING):
dotnet.write(' return Marshal.PtrToStringAnsi(r);\n')
elif (result != VOID):
dotnet.write(' return r;\n')
dotnet.write(' }\n\n')
dotnet.write(' }\n\n')
dotnet.write('}\n\n') |
class Wrapper():
def get_args(parser):
pass
def get_net(args):
return Discriminator().to(args.device)
def get_optimizer(discriminator, args):
return None |
def masked_loss_mse(mask, reg_weight=0, norm_by_mask=True):
return masked_loss(mask, K.square, reg_weight=reg_weight, norm_by_mask=norm_by_mask) |
class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TransfoXLTokenizer
test_rust_tokenizer = False
test_seq2seq = False
def setUp(self):
super().setUp()
vocab_tokens = ['<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
kwargs['lower_case'] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = '<unk> UNwanted , running'
output_text = '<unk> unwanted, running'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True)
tokens = tokenizer.tokenize('<unk> UNwanted , running')
self.assertListEqual(tokens, ['<unk>', 'unwanted', ',', 'running'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
def test_full_tokenizer_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=True)
self.assertListEqual(tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? '), ['hello', '!', 'how', 'are', 'you', '?'])
def test_full_tokenizer_no_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=False)
self.assertListEqual(tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? '), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def test_full_tokenizer_moses_numbers(self):
tokenizer = TransfoXLTokenizer(lower_case=False)
text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
tokens_out = ['Hello', '(', 'bracket', ')', 'and', 'side', '-', 'scrolled', '[', 'and', ']', 'Henry', "'s", '$', '5', ',', '000', 'with', '3', '', '34', 'm', '.', 'What', "'s", 'up', '!', '?']
self.assertListEqual(tokenizer.tokenize(text_in), tokens_out)
self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in)
def test_move_added_token(self):
tokenizer = self.get_tokenizer()
original_len = len(tokenizer)
tokenizer.add_tokens(['new1', 'new2'])
tokenizer.move_added_token('new1', 1)
self.assertEqual(len(tokenizer), (original_len + 2))
self.assertEqual(tokenizer.encode('new1'), [1])
self.assertEqual(tokenizer.decode([1]), 'new1') |
def D_adv_loss(pred, real=False, w=None):
w = match_size(w, pred)
if real:
return (w * F.relu((1 - pred))).mean()
else:
return (w * F.relu((1 + pred))).mean() |
def prepare_encoder_decoder_model_kwargs(**kwargs):
kwargs_common = {argument: value for (argument, value) in kwargs.items() if ((not argument.startswith('encoder_')) and (not argument.startswith('decoder_')))}
if ('input_ids' in kwargs_common):
kwargs['encoder_input_ids'] = kwargs_common.pop('input_ids')
decoder_kwargs = kwargs_common.copy()
encoder_kwargs = kwargs_common.copy()
encoder_kwargs.update({argument[len('encoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('encoder_')})
decoder_kwargs.update({argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')})
decoder_kwargs['encoder_attention_mask'] = encoder_kwargs.get('attention_mask', None)
return (encoder_kwargs, decoder_kwargs) |
def read_json(fname):
fname = Path(fname)
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict) |
def test_complicated():
offsets1 = ak.index.Index64(np.array([0, 3, 3, 5], dtype=np.int64))
content1 = ak.contents.ListOffsetArray(offsets1, ak.contents.NumpyArray(np.array(primes[:5], dtype=np.int64)))
offsets2 = ak.index.Index64(np.array([0, 3, 3, 5, 6, 8, 9], dtype=np.int64))
offsets3 = ak.index.Index64(np.array([0, 4, 4, 6], dtype=np.int64))
content2 = ak.contents.ListOffsetArray(offsets3, ak.contents.ListOffsetArray(offsets2, ak.contents.NumpyArray(np.array(primes[:9], dtype=np.int64))))
offsets4 = ak.index.Index64(np.array([0, 1, 1, 3], dtype=np.int64))
complicated = ak.contents.ListOffsetArray(offsets4, ak.contents.RecordArray([content1, content2], ['x', 'y']))
assert (to_list(complicated) == [[{'x': [2, 3, 5], 'y': [[2, 3, 5], [], [7, 11], [13]]}], [], [{'x': [], 'y': []}, {'x': [7, 11], 'y': [[17, 19], [23]]}]])
assert (to_list(complicated['x']) == [[[2, 3, 5]], [], [[], [7, 11]]])
assert (complicated.to_typetracer()['x'].form == complicated['x'].form)
assert (to_list(complicated['y']) == [[[[2, 3, 5], [], [7, 11], [13]]], [], [[], [[17, 19], [23]]]])
assert (complicated.to_typetracer()['y'].form == complicated['y'].form)
with pytest.raises(TypeError):
to_list(ak.prod(complicated, (- 1), highlevel=False))
with pytest.raises(TypeError):
assert (ak.prod(complicated.to_typetracer(), (- 1), highlevel=False).form == ak.prod(complicated, (- 1), highlevel=False).form)
assert (to_list(ak.prod(complicated['x'], (- 1), highlevel=False)) == [[30], [], [1, 77]])
assert (ak.prod(complicated.to_typetracer()['x'], (- 1), highlevel=False).form == ak.prod(complicated['x'], (- 1), highlevel=False).form)
assert (to_list(ak.prod(complicated['y'], (- 1), highlevel=False)) == [[[30, 1, 77, 13]], [], [[], [323, 23]]])
assert (ak.prod(complicated.to_typetracer()['y'], (- 1), highlevel=False).form == ak.prod(complicated['y'], (- 1), highlevel=False).form)
with pytest.raises(TypeError):
to_list(ak.prod(complicated, (- 2), highlevel=False))
with pytest.raises(TypeError):
assert (ak.prod(complicated.to_typetracer(), (- 2), highlevel=False).form == ak.prod(complicated, (- 2), highlevel=False).form)
assert (to_list(ak.prod(complicated['x'], (- 2), highlevel=False)) == [[2, 3, 5], [], [7, 11]])
assert (ak.prod(complicated.to_typetracer()['x'], (- 2), highlevel=False).form == ak.prod(complicated['x'], (- 2), highlevel=False).form)
assert (to_list(ak.prod(complicated['y'], (- 2), highlevel=False)) == [[[182, 33, 5]], [], [[], [391, 19]]])
assert (ak.prod(complicated.to_typetracer()['y'], (- 2), highlevel=False).form == ak.prod(complicated['y'], (- 2), highlevel=False).form)
assert (to_list(complicated[0]) == [{'x': [2, 3, 5], 'y': [[2, 3, 5], [], [7, 11], [13]]}])
assert (complicated.to_typetracer()[0].form == complicated[0].form)
with pytest.raises(TypeError):
to_list(ak.prod(complicated[0], (- 1), highlevel=False))
with pytest.raises(TypeError):
to_list(ak.prod(complicated.to_typetracer()[0], (- 1), highlevel=False)) |
def resnext101_32x8d(in_channels=3, pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet(in_channels, 'resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) |
_dispatch
def ihfft(x, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, *, plan=None):
return (Dispatchable(x, np.ndarray),) |
def run_experiment_papi_ipc(input_config):
experiments = []
experiments.append(docker_experiment(instances=1, name='inscount_papi', experiment_type='papi', input_config=input_config, additional_cfg={'papi': {'events': ['PAPI_TOT_INS', 'PAPI_LST_INS', 'PAPI_BR_INS'], 'overflow_instruction_granularity': 1000000.0, 'overflow_buffer_size': 1000000.0}}))
experiments.append(docker_experiment(instances=1, name='sp_flops_papi', experiment_type='papi', input_config=input_config, additional_cfg={'papi': {'events': ['PAPI_TOT_INS', 'PAPI_SP_OPS', 'PAPI_VEC_SP'], 'overflow_instruction_granularity': 1000000.0, 'overflow_buffer_size': 1000000.0}}))
experiments.append(docker_experiment(instances=1, name='dp_flops_papi', experiment_type='papi', input_config=input_config, additional_cfg={'papi': {'events': ['PAPI_TOT_INS', 'PAPI_DP_OPS', 'PAPI_VEC_DP'], 'overflow_instruction_granularity': 1000000.0, 'overflow_buffer_size': 1000000.0}}))
experiments.append(docker_experiment(instances=1, name='cache_papi', experiment_type='papi', input_config=input_config, additional_cfg={'papi': {'events': ['PAPI_TOT_INS', 'PAPI_L1_DCM', 'PAPI_L1_ICM', 'PAPI_L2_TCM', 'PAPI_L3_TCM'], 'overflow_instruction_granularity': 1000000.0, 'overflow_buffer_size': 1000000.0}}))
experiments.append(docker_experiment(instances=1, name='cycles_papi', experiment_type='papi', input_config=input_config, additional_cfg={'papi': {'events': ['PAPI_TOT_CYC', 'PAPI_TOT_INS', 'PAPI_STL_ICY', 'PAPI_STL_CCY', 'PAPI_RES_STL'], 'overflow_instruction_granularity': 1000000.0, 'overflow_buffer_size': 1000000.0}}))
return experiments |
def register_Ns3VhtWifiMacHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::VhtWifiMacHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DataRateForMcs', 'ns3::StringValue', [param('int', 'mcs')], is_static=True)
cls.add_method('Default', 'ns3::VhtWifiMacHelper', [], is_static=True)
return |
class ValidatedDict(dict):
validate = dict([(key, validator) for (key, (default, validator)) in six.iteritems(default_goptions)])
def __setitem__(self, key, val):
try:
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError(('%s is not a valid option. See goptions.keys() for a list of valid options.' % (key,)))
def keys(self):
ks = dict.keys(self)
ks.sort()
return ks
def values(self):
return [self[key] for key in self.keys()] |
def evaluate(dataset, predictions, output_folder, **kwargs):
args = dict(dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs)
if isinstance(dataset, datasets.KittiDataset):
return kitti_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError('Unsupported dataset type {}.'.format(dataset_name)) |
def is_a_wikilink_or_keyword(item):
if (len(item) == 1):
return 1
else:
return 0 |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
return |
def test_normalize_action(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm):
assert (as_default.normalize_action(upper_100_denormalized_jp_action) == upper_100_normalized_action).all()
assert (as_jp_full.normalize_action(upper_100_denormalized_jp_action) == upper_100_normalized_action).all()
assert (as_jp_norm.normalize_action(upper_100_denormalized_jp_action) == upper_100_normalized_action).all()
assert (as_jt_full.normalize_action(upper_100_denormalized_jt_action) == upper_100_normalized_action).all()
assert (as_jt_norm.normalize_action(upper_100_denormalized_jt_action) == upper_100_normalized_action).all()
assert (as_jt_full.normalize_action(upper_100_denormalized_jp_action) != upper_100_normalized_action).all()
assert (as_jt_norm.normalize_action(upper_100_denormalized_jp_action) != upper_100_normalized_action).all()
assert (as_jp_full.normalize_action(upper_100_denormalized_jt_action) != upper_100_normalized_action).all()
assert (as_jp_norm.normalize_action(upper_100_denormalized_jt_action) != upper_100_normalized_action).all()
assert (as_default.normalize_action(lower_100_denormalized_jp_action) == lower_100_normalized_action).all()
assert (as_jp_full.normalize_action(lower_100_denormalized_jp_action) == lower_100_normalized_action).all()
assert (as_jp_norm.normalize_action(lower_100_denormalized_jp_action) == lower_100_normalized_action).all()
assert (as_jt_full.normalize_action(lower_100_denormalized_jt_action) == lower_100_normalized_action).all()
assert (as_jt_norm.normalize_action(lower_100_denormalized_jt_action) == lower_100_normalized_action).all()
assert (as_jt_full.normalize_action(lower_100_denormalized_jp_action) != lower_100_normalized_action).all()
assert (as_jt_norm.normalize_action(lower_100_denormalized_jp_action) != lower_100_normalized_action).all()
assert (as_jp_full.normalize_action(lower_100_denormalized_jt_action) != lower_100_normalized_action).all()
assert (as_jp_norm.normalize_action(lower_100_denormalized_jt_action) != lower_100_normalized_action).all()
assert (as_jp_norm.denormalize_action(as_jp_norm.normalize_action(upper_100_denormalized_jp_action)) == pytest.approx(upper_100_denormalized_jp_action))
assert (as_jt_norm.denormalize_action(as_jt_norm.normalize_action(upper_100_denormalized_jt_action)) == pytest.approx(upper_100_denormalized_jt_action))
assert (as_jp_norm.denormalize_action(as_jp_norm.normalize_action(lower_100_denormalized_jp_action)) == pytest.approx(lower_100_denormalized_jp_action))
assert (as_jt_norm.denormalize_action(as_jt_norm.normalize_action(lower_100_denormalized_jt_action)) == pytest.approx(lower_100_denormalized_jt_action)) |
def unpackage_configuration(conf):
confStr = conf.to_string()
fileName = conf.build_folder()
print('Unpackaging {}...'.format(confStr))
sourceDir = os.path.join(conf.target, fileName)
targetDir = os.path.join(PROJECT_CONFIG['build_dir'], fileName)
(folders, filesToCopy) = files_to_copy(conf, conf.target)
for folder in folders:
try:
os.makedirs(os.path.join(targetDir, folder))
except FileExistsError:
pass
filesCopied = 0
filesMissing = 0
for path in filesToCopy:
try:
shutil.copy(os.path.join(sourceDir, path), os.path.join(targetDir, path))
filesCopied += 1
except FileNotFoundError:
filesMissing += 1
if (filesCopied == 0):
raise FileNotFoundError('Files not found!')
if (filesMissing > 0):
print('WARNING: only {} / {} files copied ({} files missing).'.format(filesCopied, (filesCopied + filesMissing), filesMissing))
with open(os.path.join(targetDir, 'configure.sh'), 'r') as inFile:
confStr = inFile.read()
with open(os.path.join(targetDir, 'configure.sh'), 'w') as outFile:
fixed = re.sub(' -DCMAKE_C(XX)?_COMPILER=[^ ]*', '', confStr)
outFile.write(fixed)
run_build(conf, clean=False, hardware=False) |
class SRWLOptCryst(SRWLOpt):
def __init__(self, _d_sp, _psi0r, _psi0i, _psi_hr, _psi_hi, _psi_hbr, _psi_hbi, _tc, _ang_as, _nvx=0, _nvy=0, _nvz=(- 1), _tvx=1, _tvy=0, _uc=1):
self.dSp = _d_sp
self.psi0r = _psi0r
self.psi0i = _psi0i
self.psiHr = _psi_hr
self.psiHi = _psi_hi
self.psiHbr = _psi_hbr
self.psiHbi = _psi_hbi
self.tc = _tc
self.angAs = _ang_as
self.nvx = _nvx
self.nvy = _nvy
self.nvz = _nvz
self.tvx = _tvx
self.tvy = _tvy
self.aux_energy = None
self.aux_ang_dif_pl = None
self.uc = _uc
def set_orient(self, _nvx=0, _nvy=0, _nvz=(- 1), _tvx=1, _tvy=0):
self.nvx = _nvx
self.nvy = _nvy
self.nvz = _nvz
self.tvx = _tvx
self.tvy = _tvy
def find_orient(self, _en, _ang_dif_pl=0):
self.aux_energy = _en
self.aux_ang_dif_pl = _ang_dif_pl
eV2wA = 12398.4193009
wA = (eV2wA / _en)
kh = (1.0 / self.dSp)
hv = [0, (kh * cos(self.angAs)), ((- kh) * sin(self.angAs))]
tBr = asin(((wA * kh) / 2))
tKin = (tBr - self.angAs)
tKou = (tBr + self.angAs)
abs_c0 = sqrt(((self.psi0r * self.psi0r) + (self.psi0i * self.psi0i)))
dTref = (((0.5 * abs_c0) * (1 + (sin(tKou) / sin(tKin)))) / sin((2 * tBr)))
tIn = (tKin + dTref)
def prodV(_a, _b):
return [((_a[1] * _b[2]) - (_a[2] * _b[1])), ((_a[2] * _b[0]) - (_a[0] * _b[2])), ((_a[0] * _b[1]) - (_a[1] * _b[0]))]
def prodMV(_m, _v):
return [(((_m[0][0] * _v[0]) + (_m[0][1] * _v[1])) + (_m[0][2] * _v[2])), (((_m[1][0] * _v[0]) + (_m[1][1] * _v[1])) + (_m[1][2] * _v[2])), (((_m[2][0] * _v[0]) + (_m[2][1] * _v[1])) + (_m[2][2] * _v[2]))]
def normV(_a):
return sqrt(sum(((n ** 2) for n in _a)))
nv = [0, cos(tIn), (- sin(tIn))]
tv = [0, sin(tIn), cos(tIn)]
sv = prodV(nv, tv)
mc = [[sv[0], nv[0], tv[0]], [sv[1], nv[1], tv[1]], [sv[2], nv[2], tv[2]]]
z1c = [sv[2], sqrt(((1.0 - (sv[2] ** 2)) - ((tv[2] + (wA * hv[2])) ** 2))), (tv[2] + (wA * hv[2]))]
rz = prodMV(mc, z1c)
x1c = prodV(hv, z1c)
if (sum(((n ** 2) for n in x1c)) == 0):
x1c = prodV(nv, z1c)
if (sum(((n ** 2) for n in x1c)) == 0):
x1c = sv
x1c = [(n / normV(x1c)) for n in x1c]
rx = prodMV(mc, x1c)
ry = prodV(rz, rx)
tvNew = None
svNew = None
nvNew = None
ex = None
ey = None
ez = None
tolAng = 1e-06
if (abs(_ang_dif_pl) < tolAng):
tvNew = tv
svNew = sv
nvNew = nv
ex = rx
ey = ry
ez = rz
else:
cosA = cos(_ang_dif_pl)
sinA = sin(_ang_dif_pl)
mr = [[cosA, (- sinA), 0], [sinA, cosA, 0], [0, 0, 1]]
ez = prodMV(mr, rz)
ezIn = [0, 0, 1]
e1 = prodV(ez, ezIn)
abs_e1x = abs(e1[0])
abs_e1y = abs(e1[1])
if (abs_e1x >= abs_e1y):
if (e1[0] > 0):
ex = e1
else:
ex = [(- e1[0]), (- e1[1]), (- e1[2])]
ex = [(n / normV(ex)) for n in ex]
ey = prodV(ez, ex)
else:
if (e1[1] > 0):
ey = e1
else:
ey = [(- e1[0]), (- e1[1]), (- e1[2])]
ey = [(n / normV(ey)) for n in ey]
ex = prodV(ey, ez)
tvNew = prodMV(mr, tv)
svNew = prodMV(mr, sv)
nvNew = prodMV(mr, nv)
if (self.uc == 2):
ex = [1, 0, 0]
ey = [0, 1, 0]
ez = [0, 0, 1]
return [[tvNew, svNew, nvNew], [ex, ey, ez]] |
class CrossValidatedTask(BaseTask):
def __init__(self, wrapped_task: BaseTask, num_folds: int=4, seed: int=None):
self.wrapped_task: BaseTask = wrapped_task
self.num_folds = num_folds
self.folds = None
self._spec = wrapped_task.spec()
self.set_fold(0)
self.seed = seed
def set_fold(self, fold: int):
self.fold = fold
self._spec.output_dir = f'{self._spec.task_dir}-fold{self.fold}'
def _read_folds(self, data_path: str):
if (self.seed is not None):
random.seed(self.seed)
data: List[DataExample] = []
for record in self.wrapped_task.read(data_path, 'train'):
data.append(record)
random.shuffle(data)
folds = [[] for _ in range(self.num_folds)]
for (idx, record) in enumerate(data):
fold_idx = (idx % self.num_folds)
folds[fold_idx].append(record)
return folds
def read(self, data_path: str, split: str) -> Iterable[DataExample]:
if (self.folds is None):
self.folds = self._read_folds(data_path)
if (split == 'dev'):
return self.wrapped_task.read(data_path, split)
elif (split == 'train'):
folds = [self.folds[idx] for idx in range(self.num_folds) if (idx != self.fold)]
return chain(*folds)
elif (split == 'test'):
return [rec for rec in self.folds[self.fold]]
def cv_folds(wrapped_task: BaseTask, num_folds: int=4, seed: int=None) -> Iterable[BaseTask]:
task = CrossValidatedTask(wrapped_task, num_folds, seed)
for fold in range(num_folds):
task.set_fold(fold)
(yield task) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='io', module='mmio', private_modules=['_mmio'], all=__all__, attribute=name) |
def test():
empty1 = ak.highlevel.Array(ak.contents.EmptyArray(), check_valid=True)
empty2 = ak.highlevel.Array(ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 0, 0, 0], dtype=np.int64)), ak.contents.EmptyArray()), check_valid=False)
array = ak.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]], check_valid=True)
assert (ak.operations.to_numpy(empty1).dtype.type is np.float64)
assert (to_list(array[empty1]) == [])
assert (to_list(array[(empty1,)]) == [])
assert (to_list(array[empty2]) == [[], [], []])
assert (to_list(array[(empty2,)]) == [[], [], []]) |
.parametrize('action_size', [4])
def test_identity_transformer_action_sampler(action_size: int) -> None:
action_sampler = IdentityTransformerActionSampler()
x = np.random.random(action_size)
action = action_sampler(x)
assert np.all((action == x)) |
def exportable_test_case_with_unexpected_exception(function_mock):
test_case = dtc.DefaultTestCase(ModuleTestCluster(0))
float_stmt = FloatPrimitiveStatement(test_case, 42.23)
function_stmt = FunctionStatement(test_case, function_mock, {'z': float_stmt.ret_val})
function_stmt.add_assertion(ass.ExceptionAssertion('builtins', 'ValueError'))
test_case.add_statement(float_stmt)
test_case.add_statement(function_stmt)
return tcc.TestCaseChromosome(test_case) |
class SpectralOpFuzzer(benchmark.Fuzzer):
def __init__(self, *, seed: int, dtype=torch.float64, cuda: bool=False, probability_regular: float=1.0):
super().__init__(parameters=[FuzzedParameter('ndim', distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), [FuzzedParameter(name=f'k_any_{i}', minval=MIN_DIM_SIZE, maxval=MAX_DIM_SIZE, distribution='loguniform') for i in range(3)], [FuzzedParameter(name=f'k_regular_{i}', distribution={size: (1.0 / len(REGULAR_SIZES)) for size in REGULAR_SIZES}) for i in range(3)], [FuzzedParameter(name=f'k{i}', distribution={ParameterAlias(f'k_regular_{i}'): probability_regular, ParameterAlias(f'k_any_{i}'): (1 - probability_regular)}, strict=True) for i in range(3)], [FuzzedParameter(name=f'step_{i}', distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}) for i in range(3)]], tensors=[FuzzedTensor(name='x', size=('k0', 'k1', 'k2'), steps=('step_0', 'step_1', 'step_2'), probability_contiguous=0.75, min_elements=(4 * 1024), max_elements=(32 * (1024 ** 2)), max_allocation_bytes=(2 * (1024 ** 3)), dim_parameter='ndim', dtype=dtype, cuda=cuda)], seed=seed) |
def set_defaults(dict_, defaults):
for (key, val) in six.iteritems(defaults):
dict_.setdefault(key, val) |
def get_word2vec(args, word_counter):
glove_path = os.path.join(args.glove_dir, 'glove.{}.{}d.txt'.format(args.glove_corpus, args.glove_vec_size))
sizes = {'6B': int(400000.0), '42B': int(1900000.0), '840B': int(2200000.0), '2B': int(1200000.0)}
total = sizes[args.glove_corpus]
word2vec_dict = {}
with open(glove_path, 'r') as fh:
for line in tqdm(fh, total=total):
array = line.lstrip().rstrip().split(' ')
word = array[0]
vector = list(map(float, array[1:]))
if (word in word_counter):
word2vec_dict[word] = vector
elif (word.capitalize() in word_counter):
word2vec_dict[word.capitalize()] = vector
elif (word.lower() in word_counter):
word2vec_dict[word.lower()] = vector
elif (word.upper() in word_counter):
word2vec_dict[word.upper()] = vector
print('{}/{} of word vocab have corresponding vectors in {}'.format(len(word2vec_dict), len(word_counter), glove_path))
return word2vec_dict |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (data_args.server_ip and data_args.server_port):
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if training_args.do_train:
if (model_args.train_language is None):
train_dataset = load_dataset('xnli', model_args.language, split='train', cache_dir=model_args.cache_dir)
else:
train_dataset = load_dataset('xnli', model_args.train_language, split='train', cache_dir=model_args.cache_dir)
label_list = train_dataset.features['label'].names
if training_args.do_eval:
eval_dataset = load_dataset('xnli', model_args.language, split='validation', cache_dir=model_args.cache_dir)
label_list = eval_dataset.features['label'].names
if training_args.do_predict:
predict_dataset = load_dataset('xnli', model_args.language, split='test', cache_dir=model_args.cache_dir)
label_list = predict_dataset.features['label'].names
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task='xnli', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
def preprocess_function(examples):
return tokenizer(examples['premise'], examples['hypothesis'], padding=padding, max_length=data_args.max_seq_length, truncation=True)
if training_args.do_train:
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if training_args.do_eval:
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
metric = load_metric('xnli')
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = np.argmax(preds, axis=1)
return metric.compute(predictions=preds, references=p.label_ids)
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator)
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.save_model()
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
(predictions, labels, metrics) = trainer.predict(predict_dataset, metric_key_prefix='predict')
max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset))
metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
predictions = np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, 'predictions.txt')
if trainer.is_world_process_zero():
with open(output_predict_file, 'w') as writer:
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
item = label_list[item]
writer.write(f'''{index} {item}
''') |
class multiplanetPoincareSystem(rebound.Simulation):
def add(self, *args, **kwargs):
super(multiplanetPoincareSystem, self).add(*args, **kwargs)
self.sim_to_myvars()
def sim_to_myvars(self):
ps = self.particles
Nps = len(ps)
Mjac = np.zeros(Nps)
mujac = np.zeros(Nps)
Mstar = ps[0].m
Mint = Mstar
for p in ps[1:]:
mi = p.m
p.mjac = ((mi * Mint) / (mi + Mint))
p.Mjac = (Mstar * (mi / p.mjac))
p.mujac = (((sim.G ** 2) * (p.Mjac ** 2)) * (p.mjac ** 3))
Mint += mi
p.Lambda = (p.mjac * np.sqrt(((sim.G * p.Mjac) * p.a)))
p.Gamma = (p.Lambda * (1.0 - np.sqrt((1.0 - (p.e ** 2)))))
p.lam = p.l
p.gamma = (- p.pomega)
def add_resonance(planet1, planet2, j, k, l):
pass
def add_all_resonances(planet1, planet2, j, k):
pass
def integrate(self, time):
Nvariables = do_integration()
self.assign_variables(Nvariables) |
class ReactAgent(BaseAgent):
def __init__(self, llm, context_len=2000):
super().__init__(llm, context_len)
self.type = 'React_Webrun_Agent'
self.name = f'{self.type}_{self.life_label}'
def prompt_layer(self):
one_shot = pre_prompt.oneshot
prompt = f'''{one_shot}{self.observations[self.cur_session][0]}
Action:'''
if (len(self.actions[self.cur_session]) > 1):
initial_prompt = f'''{one_shot}{self.observations[self.cur_session][0]}
'''
history = self.get_history()
remain_context_space = ((self.context_len - len(token_enc.encode(initial_prompt))) * 3)
history = history[(- int(remain_context_space)):]
prompt = f'''{initial_prompt}{history}
Action:'''
return prompt
def forward(self, observation, available_actions=None):
self.observations[self.cur_session].append(observation)
prompt = self.prompt_layer()
action = self.llm_layer(prompt)
self.actions[self.cur_session].append(action)
return action |
class Discriminator2D(nn.Module):
def __init__(self, opt=None):
super(Discriminator2D, self).__init__()
self.main = nn.Sequential(nn.Conv3d(6, 64, kernel_size=(1, 4, 4), stride=(1, 2, 2), padding=(0, 2, 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(64, 128, kernel_size=(1, 4, 4), stride=(1, 2, 2), padding=(0, 2, 2)), nn.BatchNorm3d(128, affine=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d(128, 1, kernel_size=(1, 4, 4), stride=(1, 1, 1), padding=(0, 2, 2)), nn.Sigmoid())
def forward(self, x):
return self.main(x) |
def create_pipeline_configuration(DEBUG=False, batch_size=32):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (T5LayerNorm, StatelessEmbedding, Embedding, Dropout, Linear), 'model_inputs': {'attention_mask': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 8]}, 'decoder_attention_mask': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'decoder_input_ids': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___333': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___335': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___333': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___335': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_8': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'attention_mask': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_8': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]': {'shape': torch.Size([32, 64, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'labels': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]': {'shape': torch.Size([32, 64, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
def init_wandb(directory, config):
if (('NO_WANDB' in os.environ) and (os.environ['NO_WANDB'] == 'true')):
log.info('== Working without wandb')
return None
directory_contents = directory.split('/')
run_name = directory_contents[(- 1)]
date = directory_contents[(- 2)]
strat_name = directory_contents[(- 3)]
model_name = directory_contents[(- 4)]
task = directory_contents[(- 5)]
group_name = f'{task}|{model_name}|{strat_name}|{date}'
run_name = f'{run_name}'
return wandb.init(group=group_name, name=run_name, config=config, job_type='train', force=True, tags=[strat_name, model_name, task]) |
class GRUFused(Function):
def forward(ctx, input_gate, hidden_gate, hx, ibias=None, hbias=None):
ctx.backend = type2backend[input_gate.type()]
hy = input_gate.new()
workspace = input_gate.new((hx.numel() * 5))
ctx.has_bias = False
if (ibias is not None):
ctx.has_bias = True
if (ibias.dim() == 1):
ibias = ibias.unsqueeze(0)
if (hbias.dim() == 1):
hbias = hbias.unsqueeze(0)
ctx.backend.GRUFused_updateOutput(ctx.backend.library_state, input_gate, hidden_gate, ibias, hbias, hx, hy, workspace)
ctx.workspace = workspace
ctx.igate_size = input_gate.size()
ctx.hgate_size = hidden_gate.size()
return hy
_differentiable
def backward(ctx, gradOutput):
ctx.backend = type2backend[gradOutput.type()]
gradInputHx = gradOutput.new()
gradInInput = gradOutput.new(*ctx.igate_size)
gradInHidden = gradOutput.new(*ctx.hgate_size)
ctx.backend.GRUFused_updateGradInput(ctx.backend.library_state, gradInInput, gradInHidden, gradOutput, gradInputHx, ctx.workspace)
gb1 = gb2 = None
if ctx.has_bias:
gb1 = gradInInput.sum(0, keepdim=False)
gb2 = gradInHidden.sum(0, keepdim=False)
return (gradInInput, gradInHidden, gradInputHx, gb1, gb2) |
class Exemplar1K(Dataset):
def __init__(self, data_root, classes, num_samples, transform):
self.transform = transform
self.sample_filepaths = []
self.train = train
self.train_sample_cls = []
self.test_sample_cls = []
self.train_data = []
self.test_data = []
f = open('./data/class_folder_list.txt', 'r')
lines = f.readlines()
dir_list = []
for x in lines:
dir_list.append(x.split(' ')[0])
np.random.seed(1993)
cls_list = [i for i in range(1000)]
np.random.shuffle(cls_list)
dir_list = [dir_list[i] for i in cls_list]
for (cls_idx, cls) in enumerate(dir_list):
cls_folder = os.path.join(data_root, cls)
if (cls_idx in classes):
sample_idx = 0
for sample in os.listdir(cls_folder):
sample_filepath = os.path.join(cls_folder, sample)
self.sample_filepaths.append(sample_filepath)
if train:
self.train_sample_cls.append(cls_idx)
else:
self.test_sample_cls.append(cls_idx)
def __len__(self):
if self.train:
return len(self.train_sample_cls)
else:
return len(self.test_sample_cls)
def __getitem__(self, idx):
if self.train:
img = cv2.imread(self.sample_filepaths[idx])
img = self.transform(img)
label = self.train_sample_cls[idx]
return (img, img, label)
else:
img = cv2.imread(self.sample_filepaths[idx])
img = self.transform(img)
label = self.test_sample_cls[idx]
return (img, img, label)
def get_image_class(self, label):
list_label = []
list_label = [np.array(cv2.imread(self.sample_filepaths[idx])) for (idx, k) in enumerate(self.train_sample_cls) if (k == label)]
return np.array(list_label)
def append(self, images, labels):
self.train_data = np.concatenate((self.train_data, images), axis=0)
self.train_sample_cls = (self.train_sample_cls + labels) |
def test_dde_simple():
def dde_tester(a: dace.float64[20], b: dace.float64[20]):
c = (a + b)
b[:] = a
sdfg = dde_tester.to_sdfg()
Pipeline([DeadDataflowElimination()]).apply_pass(sdfg, {})
sdfg.simplify()
assert (sdfg.number_of_nodes() == 1)
assert all(((n.data != 'c') for n in sdfg.node(0).data_nodes())) |
def test_pdf_set_poi(backend):
model = pyhf.simplemodels.uncorrelated_background([5.0], [10.0], [2.5])
assert (model.config.poi_index == 0)
assert (model.config.poi_name == 'mu')
model.config.set_poi('uncorr_bkguncrt')
assert (model.config.poi_index == 1)
assert (model.config.poi_name == 'uncorr_bkguncrt')
model.config.set_poi(None)
assert (model.config.poi_index is None)
assert (model.config.poi_name is None) |
class CompoundTransformerLayer(TransformerLayer):
def __init__(self, units: int, transformer_list: List[TransformerLayer]):
self.transformer_list = transformer_list
super(CompoundTransformerLayer, self).__init__(units=units)
def transform(self, inputs: tf.Tensor) -> tf.Tensor:
outputs = inputs
for transformer in self.transformer_list:
outputs = transformer.transform(outputs)
return outputs
def inverse_transform(self, outputs: tf.Tensor) -> tf.Tensor:
inputs = outputs
for transformer in self.transformer_list[::(- 1)]:
inputs = transformer.inverse_transform(inputs)
return inputs |
def steenrod_basis_error_check(dim, p, **kwds):
from sage.misc.verbose import verbose
generic = kwds.get('generic', (p != 2))
if (not generic):
bases = ('adem', 'woody', 'woodz', 'wall', 'arnona', 'arnonc', 'pst_rlex', 'pst_llex', 'pst_deg', 'pst_revz', 'comm_rlex', 'comm_llex', 'comm_deg', 'comm_revz')
else:
bases = ('adem', 'pst_rlex', 'pst_llex', 'pst_deg', 'pst_revz', 'comm_rlex', 'comm_llex', 'comm_deg', 'comm_revz')
for i in range(dim):
if ((i % 5) == 0):
verbose(('up to dimension %s' % i))
milnor_dim = len(steenrod_algebra_basis.f(i, 'milnor', p=p, generic=generic))
for B in bases:
if (milnor_dim != len(steenrod_algebra_basis.f(i, B, p, generic=generic))):
print('problem with milnor/{} in dimension {}'.format(B, i))
mat = convert_to_milnor_matrix.f(i, B, p, generic=generic)
if ((mat.nrows() != 0) and (not mat.is_invertible())):
print(('%s invertibility problem in dim %s at p=%s' % (B, i, p)))
verbose('done checking, no profiles')
bases = ('pst_rlex', 'pst_llex', 'pst_deg', 'pst_revz')
if (not generic):
profiles = [(4, 3, 2, 1), (2, 2, 3, 1, 1), (0, 0, 0, 2)]
else:
profiles = [((3, 2, 1), ()), ((), (2, 1, 2)), ((3, 2, 1), (2, 2, 2, 2))]
for i in range(dim):
if ((i % 5) == 0):
verbose(('up to dimension %s' % i))
for pro in profiles:
milnor_dim = len(steenrod_algebra_basis.f(i, 'milnor', p=p, profile=pro, generic=generic))
for B in bases:
if (milnor_dim != len(steenrod_algebra_basis.f(i, B, p, profile=pro, generic=generic))):
print(('problem with milnor/%s in dimension %s with profile %s' % (B, i, pro)))
verbose('done checking with profiles') |
def _imresize_before(img, size, channel_first, interpolate, interpolations_map):
if (not isinstance(img, np.ndarray)):
raise ValueError('the input img for imresize must be numpy.ndarray.')
if (not isinstance(size, (list, tuple))):
raise ValueError('size must be list or tuple')
if (len(img.shape) not in [2, 3]):
raise ValueError('Invalid dimension size of input image. (dims: {})'.format(len(img.shape)))
if (interpolate not in interpolations_map):
raise ValueError('unknown interpolation type. In this backend, you can use only one of [{}]'.format(', '.join(interpolations_map)))
if (img.dtype not in [np.uint8, np.uint16]):
img = np.asarray(img, np.float32)
if (channel_first and (len(img.shape) == 3)):
img = img.transpose((1, 2, 0))
if ((len(img.shape) == 3) and (np.prod(img.shape[:(- 1)]) == 1)):
cur_dtype = img.dtype
img = (img * np.ones((2, 2, 1))).astype(cur_dtype)
return img |
def test_conformer():
import resource
import sys
try:
resource.setrlimit(resource.RLIMIT_STACK, ((2 ** 29), (- 1)))
except Exception as exc:
print(f'resource.setrlimit {type(exc).__name__}: {exc}')
sys.setrecursionlimit((10 ** 6))
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
from returnn.frontend.encoder.conformer import ConformerEncoder, ConformerConvSubsample
def _forward_step(*, model: ConformerEncoder, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: ConformerEncoder(in_dim, Dim(14, name='out'), ff_dim=Dim(17, name='ff'), input_layer=ConformerConvSubsample(in_dim, out_dims=[Dim(32, name='conv1'), Dim(64, name='conv2')], filter_sizes=[(3, 3), (3, 3)], pool_sizes=[(2, 1), (2, 1)]), num_heads=2, num_layers=2)), _forward_step) |
def focal_loss_with_logits(output: torch.Tensor, target: torch.Tensor, gamma: float=2.0, alpha: Optional[float]=0.25, reduction: str='mean', normalized: bool=False, reduced_threshold: Optional[float]=None, eps: float=1e-06, ignore_index=None) -> torch.Tensor:
target = target.type_as(output)
p = torch.sigmoid(output)
ce_loss = F.binary_cross_entropy_with_logits(output, target, reduction='none')
pt = ((p * target) + ((1 - p) * (1 - target)))
if (reduced_threshold is None):
focal_term = (1.0 - pt).pow(gamma)
else:
focal_term = ((1.0 - pt) / reduced_threshold).pow(gamma)
focal_term = torch.masked_fill(focal_term, (pt < reduced_threshold), 1)
loss = (focal_term * ce_loss)
if (alpha is not None):
loss *= ((alpha * target) + ((1 - alpha) * (1 - target)))
if (ignore_index is not None):
ignore_mask = target.eq(ignore_index)
loss = torch.masked_fill(loss, ignore_mask, 0)
if normalized:
focal_term = torch.masked_fill(focal_term, ignore_mask, 0)
if normalized:
norm_factor = focal_term.sum(dtype=torch.float32).clamp_min(eps)
loss /= norm_factor
if (reduction == 'mean'):
loss = loss.mean()
if (reduction == 'sum'):
loss = loss.sum(dtype=torch.float32)
if (reduction == 'batchwise_mean'):
loss = loss.sum(dim=0, dtype=torch.float32)
return loss |
def euclidean_distance_standardized(v1, v2):
v1_v2 = np.vstack([v1, v2])
sk_v1_v2 = np.var(v1_v2, axis=0, ddof=1)
return np.sqrt((((v1 - v2) ** 2) / (sk_v1_v2 + (zero_bit * np.ones_like(sk_v1_v2)))).sum()) |
class DataParallelModel(DataParallel):
def forward(self, inputs, **kwargs):
kwargs = scatter(kwargs, self.device_ids[:len(inputs)], self.dim)
if (len(self.device_ids) == 1):
return (self.module(*inputs[0], **kwargs[0]),)
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
def replicate(self, module, device_ids):
modules = super(DataParallelModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules |
_checkable
class AuthProvider(Generic[Auth], Protocol):
def get(self, case: Case, context: AuthContext) -> (Auth | None):
def set(self, case: Case, data: Auth, context: AuthContext) -> None: |
def eval(args):
bench = benchmark_set.BenchmarkSet(args.benchmark)
bench.set_instance(args.instance)
if (args.kwargs is None):
args.kwargs = sample_random(bench)
ys = bench.objective_function(args.kwargs)
return ys |
class YT8MDialDataset(BaseDataset):
def __init__(self, **kwargs):
super().__init__(kwargs['vis_processor'], kwargs['text_processor'], kwargs['vis_root'], kwargs['ann_paths'])
self.modalities = kwargs['modalities']
for modality in self.modalities:
if ('image' in modality):
setattr(self, f'existing_{modality}_annotation', getattr(self, f'get_existing_{modality}_annotations')())
continue
setattr(self, f'{modality}_root', kwargs[f'{modality}_root'])
setattr(self, f'{modality}_processor', kwargs[f'{modality}_processor'])
setattr(self, f'existing_{modality}_annotation', getattr(self, f'get_existing_{modality}_annotations')())
self.sample_ids = set.intersection(*[set(getattr(self, f'existing_{modality}_annotation')) for modality in self.modalities])
self.annotation = [ann for ann in self.annotation if (ann['youtube_id'] in self.sample_ids)]
def get_existing_audio_annotations(self):
return [f.split('_')[0] for f in os.listdir(self.audio_root)]
def get_existing_video_annotations(self):
return [f.split('_')[0] for f in os.listdir(self.video_root)]
def get_audio_path(self, ann):
return os.path.join(self.audio_root, f"{ann['youtube_id']}_{ann['start_sec']}_{ann['end_sec']}.flac")
def get_video_path(self, ann):
return os.path.join(self.video_root, f"{ann['youtube_id']}_{ann['start_sec']}_{ann['end_sec']}.mp4")
def __getitem__(self, index):
ann = copy.deepcopy(self.annotation[index])
for modality in self.modalities:
ann[f'{modality}_path'] = getattr(self, f'get_{modality}_path')(ann)
if (type(ann[f'{modality}_path']) == list):
ann[f'{modality}_path'] = random.choice(ann[f'{modality}_path'])
if ('video' in modality):
try:
ann['video'] = getattr(self, f'video_processor')(ann[f'video_path'], start_sec=ann['start_sec'], end_sec=ann['end_sec']).to(torch.float32)
except:
return None
elif ('image' in modality):
ann['image'] = self.vis_processor(Image.open(ann[f'images_path']))
else:
ann[modality] = getattr(self, f'{modality}_processor')(ann[f'{modality}_path']).to(torch.float32)
ann['sample_id'] = ann['youtube_id']
ann['text_output'] = self.text_processor(ann['response'])
ann['text_input'] = self.text_processor(ann['context'])
ann['question_id'] = index
ann['captions'] = ann['response']
return ann |
class NpWrapper(gym.ObservationWrapper):
def observation(self, observation):
obs = np.array(observation).astype('int')
return obs |
def generate_tgen_config(args, tgen_clients, exit_peers, hs_peers):
abs_conf_path = '{}/{}'.format(args.prefix, CONFIG_DIRNAME)
if (not os.path.exists(abs_conf_path)):
os.makedirs(abs_conf_path)
hosts_prefix = '{}/{}/{}'.format(args.prefix, SHADOW_TEMPLATE_PATH, SHADOW_HOSTS_PATH)
if (not os.path.exists(hosts_prefix)):
os.makedirs(hosts_prefix)
__generate_tgenrc_server(abs_conf_path)
__generate_tgenrc_perfclient(exit_peers, os.path.join(abs_conf_path, TGENRC_PERFCLIENT_EXIT_FILENAME))
__generate_tgenrc_perfclient(hs_peers, os.path.join(abs_conf_path, TGENRC_PERFCLIENT_HS_FILENAME))
__generate_tgenrc_markovclients(abs_conf_path, hosts_prefix, tgen_clients)
__generate_tgen_traffic_models(args, abs_conf_path) |
class FlaxAutoModelForMaskedLM(_BaseAutoModelClass):
_model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING |
_context(matplotlib_settings)
def plot_potential_to_axes(axes: Axes, x_vals: ndarray, potential_vals: Union[(ndarray, List[float])], offset_list: Union[(ndarray, List[float])], **kwargs) -> None:
y_min = np.min(potential_vals)
y_max = np.max(offset_list)
y_range = (y_max - y_min)
y_max += (0.3 * y_range)
y_min = (np.min(potential_vals) - (0.1 * y_range))
axes.set_ylim([y_min, y_max])
axes.plot(x_vals, potential_vals, color='gray', **_extract_kwargs_options(kwargs, 'plot')) |
def process_cache(cached_lines):
tokens = []
ner_tags = []
for line in cached_lines:
array = line.split('\t')
if (len(array) < MIN_NUM_FIELD):
array = line.split()
assert ((len(array) >= MIN_NUM_FIELD) and (len(array) <= MAX_NUM_FIELD)), 'Got unexpected line length: {}'.format(array)
tokens.append(array[0])
ner_tags.append(array[(- 1)])
return (tokens, ner_tags) |
class LatticePolygon_PPL_class(LatticePolytope_PPL_class):
_method
def ordered_vertices(self):
neighbors = dict()
if (self.affine_dimension() < 2):
return self.vertices()
for c in self.minimized_constraints():
(v1, v2) = self.vertices_saturating(c)
neighbors[v1] = ([v2] + neighbors.get(v1, []))
neighbors[v2] = ([v1] + neighbors.get(v2, []))
v_prev = self.vertices()[0]
v_curr = neighbors[v_prev][0]
result = [v_prev, v_curr]
while (len(result) < self.n_vertices()):
(v1, v2) = neighbors[v_curr]
if (v1 == v_prev):
v_next = v2
else:
v_next = v1
result.append(v_next)
v_prev = v_curr
v_curr = v_next
return tuple(result)
def _find_isomorphism_degenerate(self, polytope):
from sage.geometry.polyhedron.lattice_euclidean_group_element import LatticePolytopesNotIsomorphicError
polytope_vertices = polytope.vertices()
self_vertices = self.ordered_vertices()
if (self.n_vertices() == 0):
A = zero_matrix(ZZ, polytope.space_dimension(), self.space_dimension())
b = zero_vector(ZZ, polytope.space_dimension())
return LatticeEuclideanGroupElement(A, b)
if (self.n_vertices() == 1):
A = zero_matrix(ZZ, polytope.space_dimension(), self.space_dimension())
b = polytope_vertices[0]
return LatticeEuclideanGroupElement(A, b)
if (self.n_vertices() == 2):
self_origin = self_vertices[0]
self_ray = (self_vertices[1] - self_origin)
polytope_origin = polytope_vertices[0]
polytope_ray = (polytope_vertices[1] - polytope_origin)
(Ds, Us, Vs) = self_ray.column().smith_form()
(Dp, Up, Vp) = polytope_ray.column().smith_form()
assert (Vs.nrows() == Vs.ncols() == Vp.nrows() == Vp.ncols() == 1)
assert (abs(Vs[(0, 0)]) == abs(Vp[(0, 0)]) == 1)
A = zero_matrix(ZZ, Dp.nrows(), Ds.nrows())
A[(0, 0)] = 1
A = (((Up.inverse() * A) * Us) * (Vs[(0, 0)] * Vp[(0, 0)]))
b = (polytope_origin - (A * self_origin))
try:
A = matrix(ZZ, A)
b = vector(ZZ, b)
except TypeError:
raise LatticePolytopesNotIsomorphicError('different lattice')
hom = LatticeEuclideanGroupElement(A, b)
if (hom(self) == polytope):
return hom
raise LatticePolytopesNotIsomorphicError('different polygons')
def _find_cyclic_isomorphism_matching_edge(self, polytope, polytope_origin, p_ray_left, p_ray_right):
from sage.geometry.polyhedron.lattice_euclidean_group_element import LatticePolytopesNotIsomorphicError
polytope_matrix = block_matrix(1, 2, [p_ray_left.column(), p_ray_right.column()])
self_vertices = self.ordered_vertices()
for i in range(len(self_vertices)):
v_left = self_vertices[((i + 0) % len(self_vertices))]
v_origin = self_vertices[((i + 1) % len(self_vertices))]
v_right = self_vertices[((i + 2) % len(self_vertices))]
r_left = (v_left - v_origin)
r_right = (v_right - v_origin)
self_matrix = block_matrix(1, 2, [r_left.column(), r_right.column()])
A = self_matrix.solve_left(polytope_matrix)
b = (polytope_origin - (A * v_origin))
try:
A = matrix(ZZ, A)
b = vector(ZZ, b)
except TypeError:
continue
if (A.elementary_divisors()[0:2] != [1, 1]):
continue
hom = LatticeEuclideanGroupElement(A, b)
if (hom(self) == polytope):
return hom
raise LatticePolytopesNotIsomorphicError('different polygons')
def find_isomorphism(self, polytope):
from sage.geometry.polyhedron.lattice_euclidean_group_element import LatticePolytopesNotIsomorphicError
if (polytope.affine_dimension() != self.affine_dimension()):
raise LatticePolytopesNotIsomorphicError('different dimension')
polytope_vertices = polytope.vertices()
if (len(polytope_vertices) != self.n_vertices()):
raise LatticePolytopesNotIsomorphicError('different number of vertices')
self_vertices = self.ordered_vertices()
if (len(polytope.integral_points()) != len(self.integral_points())):
raise LatticePolytopesNotIsomorphicError('different number of integral points')
if (len(self_vertices) < 3):
return self._find_isomorphism_degenerate(polytope)
polytope_origin = polytope_vertices[0]
origin_P = C_Polyhedron(next(iter(polytope.minimized_generators())))
neighbors = []
for c in polytope.minimized_constraints():
if (not c.is_inequality()):
continue
if origin_P.relation_with(c).implies(Poly_Con_Relation.saturates()):
for (i, g) in enumerate(polytope.minimized_generators()):
if (i == 0):
continue
g = C_Polyhedron(g)
if g.relation_with(c).implies(Poly_Con_Relation.saturates()):
neighbors.append(polytope_vertices[i])
break
p_ray_left = (neighbors[0] - polytope_origin)
p_ray_right = (neighbors[1] - polytope_origin)
try:
return self._find_cyclic_isomorphism_matching_edge(polytope, polytope_origin, p_ray_left, p_ray_right)
except LatticePolytopesNotIsomorphicError:
pass
try:
return self._find_cyclic_isomorphism_matching_edge(polytope, polytope_origin, p_ray_right, p_ray_left)
except LatticePolytopesNotIsomorphicError:
pass
raise LatticePolytopesNotIsomorphicError('different polygons')
def is_isomorphic(self, polytope):
from sage.geometry.polyhedron.lattice_euclidean_group_element import LatticePolytopesNotIsomorphicError
try:
self.find_isomorphism(polytope)
return True
except LatticePolytopesNotIsomorphicError:
return False
def sub_polytopes(self):
subpolytopes = [self]
todo = list(subpolytopes)
while todo:
polytope = todo.pop()
for p in polytope.sub_polytope_generator():
if p.is_empty():
continue
if any((p.is_isomorphic(q) for q in subpolytopes)):
continue
subpolytopes.append(p)
todo.append(p)
return tuple(subpolytopes)
def plot(self):
from sage.plot.point import point2d
from sage.plot.polygon import polygon2d
vertices = self.ordered_vertices()
points = self.integral_points()
if (self.space_dimension() == 1):
vertices = [vector(ZZ, (v[0], 0)) for v in vertices]
points = [vector(ZZ, (p[0], 0)) for p in points]
point_plot = sum((point2d(p, pointsize=100, color='red') for p in points))
polygon_plot = polygon2d(vertices, alpha=0.2, color='green', zorder=(- 1), thickness=2)
return (polygon_plot + point_plot) |
def from_pandas_points_labels(df):
require = ['timestamp', 'label']
columns = df.columns.tolist()
if (not all(((x in columns) for x in require))):
raise KeyError('{} not found in columns: {}.'.format(require, columns))
df = df[(df['label'] == 1)]
return from_pandas_points(df) |
def get_features(data_dict):
users = data_dict.get('users', None)
items = data_dict.get('items', None)
timestamp_col = data_dict.get('timestamp_col', None)
ratings_col = data_dict.get('ratings_col', None)
features = [FeatureInfo(column=data_dict['user_col'], feature_hint=FeatureHint.QUERY_ID, feature_type=FeatureType.CATEGORICAL), FeatureInfo(column=data_dict['item_col'], feature_hint=FeatureHint.ITEM_ID, feature_type=FeatureType.CATEGORICAL, cardinality=data_dict['items_cardinality'])]
if timestamp_col:
features += [FeatureInfo(column=timestamp_col, feature_type=FeatureType.CATEGORICAL, feature_hint=FeatureHint.TIMESTAMP)]
if ratings_col:
features += [FeatureInfo(column=ratings_col, feature_type=FeatureType.NUMERICAL, feature_hint=FeatureHint.RATING)]
if (users is not None):
features += [FeatureInfo(column='gender', feature_type=FeatureType.CATEGORICAL)]
if (items is not None):
features += [FeatureInfo(column='category_id', feature_type=FeatureType.CATEGORICAL)]
return FeatureSchema(features) |
def getSMTPConnection():
try:
conn = smtplib.SMTP('smtp.gmail.com', 587)
conn.ehlo()
conn.starttls()
conn.ehlo()
conn.login('', 'mypassword')
except:
traceback.print_exc()
raise SMTPConnectionError
return conn |
_utils.test(arch=archs_support_ndarray_ad)
def test_ad_multiple_tapes():
N = 10
def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()):
for i in a:
p[None] += ((a[i][0] * 2) + (a[i][1] * 3))
a = ti.ndarray(ti.math.vec2, shape=N, needs_grad=True)
p = ti.ndarray(ti.f32, shape=(), needs_grad=True)
init_val = 3
for i in range(N):
a[i] = [init_val, init_val]
with ti.ad.Tape(loss=p):
compute_sum(a, p)
assert (p[None] == ((N * (2 + 3)) * init_val))
for i in range(N):
assert (a.grad[i][0] == 2)
assert (a.grad[i][1] == 3)
a.grad.fill(0)
with ti.ad.Tape(loss=p):
compute_sum(a, p)
assert (p[None] == ((N * (2 + 3)) * init_val))
for i in range(N):
assert (a.grad[i][0] == 2)
assert (a.grad[i][1] == 3) |
def load_usps0():
(X_train, y_train, X_test, y_test) = load_usps()
selected = (y_train == 10)
y_train[selected] = 1
y_train[(~ selected)] = 0
selected = (y_test == 10)
y_test[selected] = 1
y_test[(~ selected)] = 0
return (X_train, y_train, X_test, y_test) |
class Task_Head(nn.Module):
def __init__(self, args, logger):
super(Task_Head, self).__init__()
self.args = args
self.logger = logger
self.cls_embed_layer = nn.Embedding(1, args.model_task_cls_segment_hidden_dim)
if (args.model_task_cls_time_pos_embed_type == 'absolute_learned_1D'):
from models.position_encoding import PositionEmbeddingAbsoluteLearned_1D
self.time_embed_layer = PositionEmbeddingAbsoluteLearned_1D(args.model_task_cls_max_time_ids_embed, args.model_task_cls_segment_hidden_dim)
else:
raise ValueError(f'not supported {self.args.model_task_cls_time_pos_embed_type}')
if (self.args.model_task_cls_head_name == 'downstream_transformer'):
self.long_term_model = TransformerEncoderLayer(args.model_task_cls_segment_hidden_dim, args.model_task_cls_tx_nhead, args.model_task_cls_tx_dim_feedforward, args.model_task_cls_tx_dropout, args.model_task_cls_tx_activation)
self.classifier = build_mlp(input_dim=args.model_task_cls_segment_hidden_dim, hidden_dims=[args.model_task_cls_classifier_hidden_dim], output_dim=args.model_task_cls_num_classes)
def forward(self, video_feats, video_mask):
B = video_feats.shape[0]
T = video_feats.shape[1]
device = self.args.device
CLS_id = torch.arange(1, device=device).repeat(B, 1)
CLS = self.cls_embed_layer(CLS_id)
if (self.args.model_task_cls_time_pos_embed_type == 'absolute_learned_1D'):
time_ids = torch.arange(1, (T + 1), device=device).repeat(B, 1)
time_seq = self.time_embed_layer(time_ids)
elif (self.args.model_task_cls_time_pos_embed_type == 'fixed_sinusoidal_1D'):
time_seq = self.time_embed_layer(T, device=device).unsqueeze(0).unsqueeze(0).repeat(B, N, J, 1, 1)
else:
raise ValueError(f'not supported {self.args.model_task_cls_time_pos_embed_type}')
if (self.args.model_task_cls_head_name == 'downstream_transformer'):
tx_updated_sequence = self.long_term_model(torch.cat([CLS, (video_feats + time_seq)], dim=1).transpose(0, 1), src_key_padding_mask=torch.cat([torch.zeros((B, 1)).bool().to(device), video_mask], dim=1))
fine_cls = tx_updated_sequence[0]
pred_logits = self.classifier(fine_cls)
elif (self.args.model_task_cls_head_name == 'downstream_mlp'):
pred_logits = self.classifier(torch.mean((video_feats + time_seq), dim=1))
else:
self.logger.info('The model_task_cls_head_name is not implemented!\nFunc: {}\nFile:{}'.format(__name__, __file__))
os._exit(0)
return pred_logits |
def get_evaluation_chunk_extra_data_key(evaluation_chunk_id):
return 'evaluation_chunks/{}_data.bytes'.format(evaluation_chunk_id) |
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if (name == 'thisown'):
self.this.own(value)
elif (name == 'this'):
set(self, name, value)
elif (hasattr(self, name) and isinstance(getattr(type(self), name), property)):
set(self, name, value)
else:
raise AttributeError(('You cannot add instance attributes to %s' % self))
return set_instance_attr |
def instances2dict(imageFileList, verbose=False, dataset_name=None, rgb2id=None, input_image_size=None, mapillary_dataloading_style='OURS', debug=False):
imgCount = 0
instanceDict = {}
if (not isinstance(imageFileList, list)):
imageFileList = [imageFileList]
if verbose:
print('Processing {} images...'.format(len(imageFileList)))
for imageFileName in imageFileList:
img = Image.open(imageFileName)
if ('mapillary' in dataset_name):
if (mapillary_dataloading_style == 'DADA'):
raise NotImplementedError('To evaluate the mapillary on original image shape for panoptic seg, you need to first upsample the predicted masks with pad_with_fixed_AS(). This part is not implemented yet.')
else:
(img, new_image_shape) = resize_with_pad(img, [1024, 768], Image.NEAREST, pad_value=0, is_label=True)
imgNp = rgb2id(img).astype(np.uint32)
elif ('cityscapes' in dataset_name):
if debug:
img = img.resize((1024, 512), Image.NEAREST)
imgNp = np.array(img)
else:
NotImplementedError('no implementation found at def instances2dict(...) --> cityscapesscripts/evaluation/instances2dict.py')
instances = {}
for label in labels:
instances[label.name] = []
for instanceId in np.unique(imgNp):
instanceObj = Instance(imgNp, instanceId)
instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print('\rImages Processed: {}'.format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print('')
return instanceDict |
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir, pgm_proposals_thread, **kwargs):
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = (num_videos // pgm_proposals_thread)
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range((pgm_proposals_thread - 1)):
tmp_video_list = range((tid * num_videos_per_thread), ((tid + 1) * num_videos_per_thread))
p = mp.Process(target=generate_candidate_proposals, args=(tmp_video_list, video_infos, tem_results_dir), kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range(((pgm_proposals_thread - 1) * num_videos_per_thread), num_videos)
p = mp.Process(target=generate_candidate_proposals, args=(tmp_video_list, video_infos, tem_results_dir), kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, (video_name + '.csv'))
np.savetxt(proposal_path, proposals, header=header, delimiter=',', comments='')
prog_bar.update() |
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
cfg = compat_cfg(cfg)
logger = get_root_logger(log_level=cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('runner' not in cfg):
raise NotImplementedError("Please add runner in config, e.g., runner = dict(type='IterBasedRunner', max_iters=40000)")
train_dataloader_default_args = dict(samples_per_gpu=2, workers_per_gpu=2, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, persistent_workers=False)
train_loader_cfg = {**train_dataloader_default_args, **cfg.data.get('train_dataloader', {})}
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = build_ddp(model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
auto_scale_lr(cfg, distributed, logger)
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.print_layer_wise_lr:
num_params_backbone = 0
num_params_decode_head = 0
num_params_instance_head = 0
named_param_list = list(model.named_parameters())
for i in range(len(optimizer.param_groups)):
layer_name = named_param_list[i][0]
layer_param1 = named_param_list[i][1]
layer_lr = optimizer.param_groups[i]['lr']
layer_param2 = optimizer.param_groups[i]['params'][0]
assert (layer_param1.shape == layer_param2.shape), 'params shape in named_param_list and optimizerparam_groups must match, there is some problem!'
if ('module.model.backbone' in layer_name):
num_params_backbone += layer_param2.numel()
elif ('module.model.decode_head' in layer_name):
num_params_decode_head += layer_param2.numel()
elif (('module.model.neck' in layer_name) or ('module.model.rpn_head' in layer_name) or ('module.model.roi_head' in layer_name)):
num_params_instance_head += layer_param2.numel()
logger.info(f'layer_name: {layer_name}, layer_lr: {layer_lr}, layer_param1.shape: {layer_param1.shape}, layer_param2.shape: {layer_param2.shape}, num_params: {layer_param2.numel()}')
print(num_params_backbone, num_params_decode_head, num_params_instance_head)
print(((num_params_backbone + num_params_decode_head) + num_params_instance_head))
runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataloader_default_args = dict(samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False, persistent_workers=False)
val_dataloader_args = {**val_dataloader_default_args, **cfg.data.get('val_dataloader', {})}
if (val_dataloader_args['samples_per_gpu'] > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, **val_dataloader_args)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
if ((cfg.resume_from is None) and cfg.get('auto_resume')):
resume_from = find_latest_checkpoint(cfg.work_dir)
if (resume_from is not None):
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) |
class Node():
balance = 0.5
def __init__(self, state, parent, action):
self.state = state
self.parent = parent
self.action = action
self.depth = 0
if (self.parent != None):
self.depth = (parent.depth + 1)
def getChildren(self):
children = []
for d in directions:
childState = self.state.clone()
crateMove = childState.update(d['x'], d['y'])
if ((childState.player['x'] == self.state.player['x']) and (childState.player['y'] == self.state.player['y'])):
continue
if (crateMove and childState.checkDeadlock()):
continue
children.append(Node(childState, self, d))
return children
def getKey(self):
return self.state.getKey()
def getCost(self):
return self.depth
def getHeuristic(self):
return self.state.getHeuristic()
def checkWin(self):
return self.state.checkWin()
def getActions(self):
actions = []
current = self
while (current.parent != None):
actions.insert(0, current.action)
current = current.parent
return actions
def __str__(self):
return ((((str(self.depth) + ',') + str(self.state.getHeuristic())) + '\n') + str(self.state))
def __lt__(self, other):
return ((self.getHeuristic() + (Node.balance * self.getCost())) < (other.getHeuristic() + (Node.balance * other.getCost()))) |
class FBTwoHopPathCache(FBCacheBase):
FILENAME = 'TwoHopPath.bin'
def query_two_hop_paths(self, entity):
if (not self.ready):
self.load()
if (entity in self.data):
return self.data[entity]
paths = get_2hop_relations(entity)[2]
paths = self.dataset_specific_prune(paths)
self.update_count += 1
self.data[entity] = paths
return paths
def dataset_specific_prune(self, two_hops):
if (self.DATASET == 'grail'):
two_hops = [(a, b) for (a, b) in two_hops if (legal_relation(a, self.DATASET) and legal_relation(b, self.DATASET))]
return two_hops
else:
return two_hops |
class FailToTypeCheck(CustomWarning):
def __init__(self):
super().__init__('File containing type errors!') |
.parametrize('cv_result', [(1, True), (2, False), ('split', True), (KFold(5), False), (ShuffleSplit(1), True), (ShuffleSplit(2), False), (LeaveOneOut(), False)])
def test_check_no_agg_cv(cv_result: Tuple) -> None:
array = ['prefit', 'split']
(cv, result) = cv_result
np.testing.assert_almost_equal(check_no_agg_cv(X_toy, cv, array), result) |
def pad_to_batch(batch, w_to_ix, s_to_ix):
(history, current, slot, intent) = list(zip(*batch))
max_history = max([len(h) for h in history])
max_len = max([h.size(1) for h in flatten(history)])
max_current = max([c.size(1) for c in current])
max_slot = max([s.size(1) for s in slot])
(historys, currents, slots) = ([], [], [])
for i in range(len(batch)):
history_p_t = []
for j in range(len(history[i])):
if (history[i][j].size(1) < max_len):
history_p_t.append(torch.cat([history[i][j], torch.LongTensor(([w_to_ix['<pad>']] * (max_len - history[i][j].size(1)))).view(1, (- 1))], 1))
else:
history_p_t.append(history[i][j])
while (len(history_p_t) < max_history):
history_p_t.append(torch.LongTensor(([w_to_ix['<pad>']] * max_len)).view(1, (- 1)))
history_p_t = torch.cat(history_p_t)
historys.append(history_p_t)
if (current[i].size(1) < max_current):
currents.append(torch.cat([current[i], torch.LongTensor(([w_to_ix['<pad>']] * (max_current - current[i].size(1)))).view(1, (- 1))], 1))
else:
currents.append(current[i])
if (slot[i].size(1) < max_slot):
slots.append(torch.cat([slot[i], torch.LongTensor(([s_to_ix['<pad>']] * (max_slot - slot[i].size(1)))).view(1, (- 1))], 1))
else:
slots.append(slot[i])
currents = torch.cat(currents)
slots = torch.cat(slots)
intents = torch.cat(intent)
return (historys, currents, slots, intents) |
_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'labels': ['array-like', None], 'pos_label': [str, numbers.Integral, None], 'average': [None, StrOptions({'binary', 'micro', 'macro', 'weighted', 'samples', 'multiclass'})], 'sample_weight': ['array-like', None], 'correction': [Interval(numbers.Real, 0, None, closed='left')]}, prefer_skip_nested_validation=True)
def geometric_mean_score(y_true, y_pred, *, labels=None, pos_label=1, average='multiclass', sample_weight=None, correction=0.0):
if ((average is None) or (average != 'multiclass')):
(sen, spe, _) = sensitivity_specificity_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('specificity', 'specificity'), sample_weight=sample_weight)
return np.sqrt((sen * spe))
else:
present_labels = unique_labels(y_true, y_pred)
if (labels is None):
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)])
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
tp = (y_true == y_pred)
tp_bins = y_true[tp]
if (sample_weight is not None):
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels))
else:
true_sum = tp_sum = np.zeros(len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
with np.errstate(divide='ignore', invalid='ignore'):
recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', None, 'recall')
recall[(recall == 0)] = correction
with np.errstate(divide='ignore', invalid='ignore'):
gmean = sp.stats.gmean(recall)
if isinstance(gmean, np.ma.core.MaskedConstant):
return 0.0
return gmean |
class TransposeType(ExplicitEnum):
NO = 'no'
SIMPLE = 'simple'
CONV1D = 'conv1d'
CONV2D = 'conv2d' |
class ParamNode(LeafNode):
def __init__(self, prod: Production):
if (not prod.is_param()):
raise ValueError('Cannot construct an AST param node from a non-param production')
super().__init__(prod)
def index(self) -> int:
prod = cast(ParamProduction, self._prod)
return prod.rhs[0]
def children(self) -> List[Node]:
return []
def is_enum(self) -> bool:
return False
def is_param(self) -> bool:
return True
def to_sexp(self):
return [Symbol(''), self.index]
def deep_eq(self, other) -> bool:
if isinstance(other, ParamNode):
return (self.index == other.index)
return False
def deep_hash(self) -> int:
return hash(self.index)
def __repr__(self) -> str:
return 'ParamNode({})'.format(self.index)
def __str__(self) -> str:
return '{}'.format(self.index) |
class TransformedDataset(Dataset):
def __init__(self, dataset, transform=None, target_transform=None):
self.dataset = dataset
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
(img, label) = self.dataset[index]
label = (self.target_transform(label) if self.target_transform else label)
img = (self.transform(img) if self.transform else img)
return (img, label) |
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels))
self.identity = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0), nn.BatchNorm2d(out_channels))
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu((self.double_conv(x) + self.identity(x))) |
def build_model(data, kernel_func=None):
variance = tf.math.reduce_variance(data.observations)
if (kernel_func is None):
kernel = gpflow.kernels.Matern52(variance=variance)
else:
kernel = kernel_func(variance)
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-05)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr) |
def div(field, variables=None):
variables = default_space_variables(variables)
n_var = len(variables)
field = list(field)
assert (len(field) == n_var)
out = 0
for (f_i, x_i) in zip(field, variables):
out += sp.sympify(f_i).diff(x_i)
return out |
def make_parser():
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--log', dest='log', default=None, help='one of [DEBUG, INFO, ERROR, WARNING, CRITICAL]')
parser.add_argument('--print-fastest-mirror', action='store_true', help='Print out the fastest mirror. All other arguments are ignored in that case.')
parser.add_argument('--quiet', action='store_true', help='Hide progress bar')
parser.add_argument('--timeout', type=float, default=None, help='Timeout for network operations')
parser.add_argument('--allow-upstream', action='store_true', help='Whether to fall back to downloading from the upstream URL')
parser.add_argument('url_or_tarball', type=str, nargs='?', default=None, help='A url or a tarball filename. In the latter case, the\n tarball is downloaded from the mirror network and its checksum\n is verified.')
parser.add_argument('destination', type=str, nargs='?', default=None, help='Where to write the file. If the destination is not specified, a url\n will be downloaded and the content written to stdout and a\n tarball will be saved under {SAGE_DISTFILES}'.format(SAGE_DISTFILES=SAGE_DISTFILES))
parser.add_argument('--no-check-certificate', action='store_true', help='Do not check SSL certificates for connections')
return parser |
.parametrize('inshape', [(8, 2, 2, 2), (16, 1, 8)])
.parametrize('n_outmaps', [16, 32])
.parametrize('base_axis', [1, 2])
.parametrize('w_init', [None, I.NormalInitializer(), True])
.parametrize('b_init', [None, I.ConstantInitializer(), True])
.parametrize('with_bias', [False, True])
.parametrize('fix_parameters', [False, True])
.parametrize('rng', [None, True])
def test_pf_affine_execution(g_rng, inshape, n_outmaps, base_axis, w_init, b_init, with_bias, fix_parameters, rng):
w_shape = (int(np.prod(inshape[base_axis:])), n_outmaps)
b_shape = (n_outmaps,)
w_init = process_param_init(w_init, w_shape, g_rng)
b_init = process_param_init(b_init, b_shape, g_rng)
rng = process_rng(rng)
kw = {}
insert_if_not_none(kw, 'w_init', w_init)
insert_if_not_none(kw, 'b_init', b_init)
insert_if_not_none(kw, 'rng', rng)
insert_if_not_default(kw, 'base_axis', base_axis, 1)
insert_if_not_default(kw, 'fix_parameters', fix_parameters, False)
insert_if_not_default(kw, 'with_bias', with_bias, True)
x = nn.Variable.from_numpy_array(g_rng.randn(*inshape))
y = PF.affine(x, n_outmaps, **kw)
y.forward()
y.backward()
assert (y.parent.info.type_name == 'Affine')
args = y.parent.info.args
assert (args['base_axis'] == base_axis)
assert (y.parent.inputs[0] == x)
assert (len(y.parent.inputs) == (2 + int(with_bias)))
assert (len(nn.get_parameters()) == (1 + int(with_bias)))
w = nn.get_parameters()['affine/W']
assert (w.shape == w_shape)
assert w.need_grad
assert (y.parent.inputs[1].need_grad == (not fix_parameters))
if isinstance(w_init, np.ndarray):
assert_allclose(w_init, w.d)
if with_bias:
b = nn.get_parameters()['affine/b']
assert (b.shape == b_shape)
assert b.need_grad
assert (y.parent.inputs[2].need_grad == (not fix_parameters))
if isinstance(b_init, np.ndarray):
assert_allclose(b_init, b.d) |
def ComputeNumSignBits(bitwidth, v):
size = v.size()
size1 = (size - 1)
sign = z3.Extract(size1, size1, v)
def rec(i):
if (i < 0):
return z3.BitVecVal(size, bitwidth)
return z3.If((z3.Extract(i, i, v) == sign), rec((i - 1)), z3.BitVecVal((size1 - i), bitwidth))
return rec((size - 2)) |
class ConvReLU3d(_FusedModule):
def __init__(self, conv, relu):
assert ((type(conv) == Conv3d) and (type(relu) == ReLU)), 'Incorrect types for input modules{}{}'.format(type(conv), type(relu))
super().__init__(conv, relu) |
def create_model(bert_config, is_training, input_ids, input_mask, input_type_ids, labels, num_labels, use_one_hot_embeddings, tsa, unsup_ratio, global_step, num_train_steps):
num_sample = input_ids.shape[0].value
if is_training:
assert ((num_sample % (1 + (2 * unsup_ratio))) == 0)
sup_batch_size = (num_sample // (1 + (2 * unsup_ratio)))
unsup_batch_size = (sup_batch_size * unsup_ratio)
else:
sup_batch_size = num_sample
unsup_batch_size = 0
sequence = modeling.bert_model(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=input_type_ids, use_one_hot_embeddings=use_one_hot_embeddings, output_type='sequence')
clas_logits = hidden_to_logits(hidden=sequence, is_training=is_training, num_classes=num_labels, scope='classifier')
log_probs = tf.nn.log_softmax(clas_logits, axis=(- 1))
correct_label_probs = None
with tf.variable_scope('sup_loss'):
sup_log_probs = log_probs[:sup_batch_size]
tf.logging.info('%d', sup_batch_size)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tgt_label_prob = one_hot_labels
per_example_loss = (- tf.reduce_sum((tgt_label_prob * sup_log_probs), axis=(- 1)))
loss_mask = tf.ones_like(per_example_loss, dtype=per_example_loss.dtype)
correct_label_probs = tf.reduce_sum((one_hot_labels * tf.exp(sup_log_probs)), axis=(- 1))
if tsa:
tsa_start = (1.0 / num_labels)
tsa_threshold = get_tsa_threshold(tsa, global_step, num_train_steps, tsa_start, end=1)
larger_than_threshold = tf.greater(correct_label_probs, tsa_threshold)
loss_mask = (loss_mask * (1 - tf.cast(larger_than_threshold, tf.float32)))
else:
tsa_threshold = 1
loss_mask = tf.stop_gradient(loss_mask)
per_example_loss = (per_example_loss * loss_mask)
sup_loss = (tf.reduce_sum(per_example_loss) / tf.maximum(tf.reduce_sum(loss_mask), 1))
unsup_loss_mask = None
if (is_training and (unsup_ratio > 0)):
with tf.variable_scope('unsup_loss'):
ori_start = sup_batch_size
ori_end = (ori_start + unsup_batch_size)
aug_start = (sup_batch_size + unsup_batch_size)
aug_end = (aug_start + unsup_batch_size)
ori_log_probs = log_probs[ori_start:ori_end]
aug_log_probs_before = log_probs[aug_start:aug_end]
(_, max_seq_length, hidden_dim) = clas_logits.get_shape().as_list()
aug_input_type_ids = input_type_ids[aug_start:aug_end]
vec = tf.to_int32(tf.expand_dims((np.arange(unsup_batch_size) * max_seq_length), (- 1)))
trans = tf.tile(vec, [1, max_seq_length])
aug_input_type_ids_trans = tf.reshape((aug_input_type_ids + trans), [(- 1)])
aug_input_type_ids_trans = tf.stop_gradient(aug_input_type_ids_trans)
aug_log_probs_middle = tf.reshape(aug_log_probs_before, [(- 1), hidden_dim])
aug_log_probs = tf.reshape(tf.gather(params=aug_log_probs_middle, indices=aug_input_type_ids_trans), [(- 1), max_seq_length, hidden_dim])
unsup_loss_mask = 1
tgt_aug_log_probs = tf.stop_gradient(aug_log_probs)
if (FLAGS.uda_confidence_thresh != (- 1)):
largest_prob = tf.reduce_max(tf.exp(ori_log_probs), axis=(- 1))
unsup_loss_mask = tf.cast(tf.greater(largest_prob, FLAGS.uda_confidence_thresh), tf.float32)
unsup_loss_mask = tf.stop_gradient(unsup_loss_mask)
per_example_kl_loss = (kl_for_log_probs(tgt_aug_log_probs, ori_log_probs) * unsup_loss_mask)
unsup_loss = tf.reduce_mean(per_example_kl_loss)
else:
unsup_loss = 0.0
return (sup_loss, unsup_loss, clas_logits[:sup_batch_size], per_example_loss, loss_mask, tsa_threshold, unsup_loss_mask, correct_label_probs) |
class Predict(Parameter):
def __init__(self, signature, **config):
self.stage = random.randbytes(8).hex()
self.signature = signature
self.config = config
self.reset()
if isinstance(signature, str):
(inputs, outputs) = signature.split('->')
(inputs, outputs) = (inputs.split(','), outputs.split(','))
(inputs, outputs) = ([field.strip() for field in inputs], [field.strip() for field in outputs])
assert all(((len(field.split()) == 1) for field in (inputs + outputs)))
inputs_ = ', '.join([f'`{field}`' for field in inputs])
outputs_ = ', '.join([f'`{field}`' for field in outputs])
instructions = f'Given the fields {inputs_}, produce the fields {outputs_}.'
inputs = {k: InputField() for k in inputs}
outputs = {k: OutputField() for k in outputs}
for (k, v) in inputs.items():
v.finalize(k, infer_prefix(k))
for (k, v) in outputs.items():
v.finalize(k, infer_prefix(k))
self.signature = dsp.Template(instructions, **inputs, **outputs)
def reset(self):
self.lm = None
self.traces = []
self.train = []
self.demos = []
def dump_state(self):
state_keys = ['lm', 'traces', 'train', 'demos']
return {k: getattr(self, k) for k in state_keys}
def load_state(self, state):
for (name, value) in state.items():
setattr(self, name, value)
import dspy
self.demos = [dspy.Example(**x) for x in self.demos]
def __call__(self, **kwargs):
return self.forward(**kwargs)
def forward(self, **kwargs):
signature = kwargs.pop('signature', self.signature)
demos = kwargs.pop('demos', self.demos)
config = dict(**self.config, **kwargs.pop('config', {}))
lm = (kwargs.pop('lm', self.lm) or dsp.settings.lm)
temperature = config.get('temperature', None)
temperature = (lm.kwargs['temperature'] if (temperature is None) else temperature)
num_generations = config.get('n', None)
num_generations = (lm.kwargs['n'] if (num_generations is None) else num_generations)
if (((temperature is None) or (temperature <= 0.15)) and (num_generations > 1)):
config['temperature'] = 0.7
x = dsp.Example(demos=demos, **kwargs)
if (self.lm is None):
(x, C) = dsp.generate(signature, **config)(x, stage=self.stage)
else:
with dsp.settings.context(lm=self.lm, query_only=True):
(x, C) = dsp.generate(signature, **config)(x, stage=self.stage)
completions = []
for c in C:
completions.append({})
for field in signature.fields:
if (field.output_variable not in kwargs.keys()):
completions[(- 1)][field.output_variable] = getattr(c, field.output_variable)
pred = Prediction.from_completions(completions, signature=signature)
if (dsp.settings.trace is not None):
trace = dsp.settings.trace
trace.append((self, {**kwargs}, pred))
return pred
def update_config(self, **kwargs):
self.config = {**self.config, **kwargs}
def get_config(self):
return self.config
def __repr__(self):
return f'{self.__class__.__name__}({self.signature})' |
class InfinitePolynomial_dense(InfinitePolynomial):
def __call__(self, *args, **kwargs):
for kw in kwargs:
value = kwargs[kw]
if isinstance(value, InfinitePolynomial):
kwargs[kw] = value._p
args = list(args)
for (i, arg) in enumerate(args):
if isinstance(arg, InfinitePolynomial):
args[i] = arg._p
self._p = self.parent().polynomial_ring()(self._p)
res = self._p(*args, **kwargs)
try:
return self.parent()(res)
except ValueError:
return res
def _richcmp_(self, x, op):
try:
self._p = self.parent()._P(self._p)
except Exception:
pass
try:
x._p = x.parent()._P(x._p)
except Exception:
pass
return richcmp(self._p, x._p, op)
def _add_(self, x):
P = self.parent()
self._p = P._P(self._p)
x._p = P._P(x._p)
return InfinitePolynomial_dense(self.parent(), (self._p + x._p))
def _mul_(self, x):
P = self.parent()
self._p = P._P(self._p)
x._p = P._P(x._p)
return InfinitePolynomial_dense(self.parent(), (self._p * x._p))
def _sub_(self, x):
P = self.parent()
self._p = P._P(self._p)
x._p = P._P(x._p)
return InfinitePolynomial_dense(self.parent(), (self._p - x._p))
def __pow__(self, n):
P = self.parent()
if callable(n):
if (self._p.parent() == self._p.base_ring()):
return self
if (not (hasattr(self._p, 'variables') and self._p.variables())):
return self
if (hasattr(n, 'to_cycles') and hasattr(n, '__len__')):
l = len(n)
def p(m):
return (n(m) if (0 < m <= l) else m)
else:
p = n
oldMax = P._max
newMax = max(([p(X) for X in range((oldMax + 1))] + [oldMax]))
if (newMax > P._max):
P.gen()[newMax]
self._p = P._P(self._p)
PP = P._P
PPgens = PP.gens()
newVars = []
sh = ((PP.ngens() // P.ngens()) - 1)
blocklength = sh
nM = (sh + 1)
for i in range(P.ngens()):
newVars.extend([PPgens[(sh - p(j))] for j in range(blocklength, (- 1), (- 1))])
sh += nM
mapR = PP.hom(newVars, PP)
return InfinitePolynomial_dense(self.parent(), mapR(self._p))
return InfinitePolynomial_dense(self.parent(), (self._p ** n)) |
def compile(source_code):
with compiler_lock:
return ROOT.gInterpreter.Declare(source_code) |
def run_translate(args):
logging.info('Running translator.')
time_limit = limits.get_time_limit(args.translate_time_limit, args.overall_time_limit)
memory_limit = limits.get_memory_limit(args.translate_memory_limit, args.overall_memory_limit)
translate = get_executable(args.build, REL_TRANSLATE_PATH)
assert sys.executable, 'Path to interpreter could not be found'
cmd = ((([sys.executable] + [translate]) + args.translate_inputs) + args.translate_options)
(stderr, returncode) = call.get_error_output_and_returncode('translator', cmd, time_limit=time_limit, memory_limit=memory_limit)
do_print_on_stderr = True
if (returncode == returncodes.TRANSLATE_OUT_OF_MEMORY):
output_related_to_memory_error = True
if (not stderr):
output_related_to_memory_error = False
for line in stderr.splitlines():
if ('MemoryError' not in line):
output_related_to_memory_error = False
break
if output_related_to_memory_error:
do_print_on_stderr = False
if (do_print_on_stderr and stderr):
returncodes.print_stderr(stderr)
if (returncode == 0):
return (0, True)
elif (returncode == 1):
return (returncodes.TRANSLATE_CRITICAL_ERROR, False)
else:
return (returncode, False) |
def rad_shifted(n, cutoff):
r0 = 0.5
rn = (cutoff - 1.0)
delta = ((rn - r0) / float((n - 1)))
sfs = [{'rad': {'cutoff': cutoff, 'eta': (0.5 / (delta ** 2)), 'mu': (r0 + (i * delta))}} for i in range(n)]
return (sfs, n, 0) |
def test():
array = ak.Array([[0, 1, 2, 3], [8, 9, 10, 11]], backend='typetracer')
other = ak.Array([1, 2], backend='cpu')
result = (array + other)
assert (ak.backend(result) == 'typetracer') |
class ImageDirectoryLoader():
def __init__(self, rootdir, pathspec=os.path.join('{source}', '{image_name}'), format='tiff', standardize=False):
self.rootdir = rootdir
self.pathspec = pathspec
self.format = format
self.standardize = standardize
def get(self, *args, **kwargs):
ext = ((self.pathspec.format(*args, **kwargs) + '.') + self.format)
path = os.path.join(self.rootdir, ext)
if (self.format == 'mrc'):
with open(path, 'rb') as f:
content = f.read()
(image, header, extended_header) = mrc.parse(content)
if self.standardize:
image = (image - header.amean)
image /= header.rms
else:
image = Image.open(path)
fp = image.fp
image.load()
fp.close()
image = np.array(image, copy=False)
if self.standardize:
image = ((image - image.mean()) / image.std())
return Image.fromarray(image) |
def _flat_nested_json_dict(json_dict, flatted, sep='.', start=''):
for (k, v) in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, ((start + sep) + str(k)))
else:
flatted[((start + sep) + str(k))] = v |
class ParsimoniousAttack(object):
def __init__(self, model, args, **kwargs):
self.loss_func = args.loss_func
self.max_queries = args.max_queries
self.epsilon = args.epsilon
self.batch_size = args.batch_size
self.block_size = args.block_size
self.no_hier = args.no_hier
self.max_iters = args.max_iters
self.local_search = LocalSearchHelper(model, args)
def _perturb_image(self, image, noise):
adv_image = (image + noise)
adv_image = np.clip(adv_image, 0, 255)
return adv_image
def _split_block(self, upper_left, lower_right, block_size):
blocks = []
xs = np.arange(upper_left[0], lower_right[0], block_size)
ys = np.arange(upper_left[1], lower_right[1], block_size)
for (x, y) in itertools.product(xs, ys):
for c in range(3):
blocks.append([[x, y], [(x + block_size), (y + block_size)], c])
return blocks
def perturb(self, image, label, index, sess):
np.random.seed(index)
adv_image = np.copy(image)
num_queries = 0
block_size = self.block_size
upper_left = [0, 0]
lower_right = [32, 32]
blocks = self._split_block(upper_left, lower_right, block_size)
noise = ((- self.epsilon) * np.ones_like(image, dtype=np.int32))
num_blocks = len(blocks)
batch_size = (self.batch_size if (self.batch_size > 0) else num_blocks)
curr_order = np.random.permutation(num_blocks)
while True:
num_batches = int(math.ceil((num_blocks / batch_size)))
for i in range(num_batches):
bstart = (i * batch_size)
bend = min((bstart + batch_size), num_blocks)
blocks_batch = [blocks[curr_order[idx]] for idx in range(bstart, bend)]
(noise, queries, loss, success) = self.local_search.perturb(image, noise, label, sess, blocks_batch)
num_queries += queries
tf.logging.info('Block size: {}, batch: {}, loss: {:.4f}, num queries: {}'.format(block_size, i, loss, num_queries))
if (num_queries > self.max_queries):
return (adv_image, num_queries, False)
adv_image = self._perturb_image(image, noise)
if success:
return (adv_image, num_queries, True)
if ((not self.no_hier) and (block_size >= 2)):
block_size //= 2
blocks = self._split_block(upper_left, lower_right, block_size)
num_blocks = len(blocks)
batch_size = (self.batch_size if (self.batch_size > 0) else num_blocks)
curr_order = np.random.permutation(num_blocks)
else:
curr_order = np.random.permutation(num_blocks) |
class PLBartTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', language_codes='base', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, additional_special_tokens=None, **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, language_codes=language_codes, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
src_lang = self._convert_lang_code_special_format(src_lang)
tgt_lang = self._convert_lang_code_special_format(tgt_lang)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.language_codes = language_codes
fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {code: ((self.sp_model_size + i) + self.fairseq_offset) for (i, code) in enumerate(fairseq_language_codes)}
self.id_to_lang_code = {v: k for (k, v) in self.lang_code_to_id.items()}
if (self.language_codes == 'base'):
self.fairseq_tokens_to_ids['<mask>'] = ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
self._additional_special_tokens = list(self.lang_code_to_id.keys())
if (additional_special_tokens is not None):
self._additional_special_tokens.extend([t for t in additional_special_tokens if (t not in self._additional_special_tokens)])
if (self.language_codes == 'base'):
self._src_lang = src_lang
self.cur_lang_code_id = (self.lang_code_to_id[self._src_lang] if (self._src_lang is not None) else self._src_lang)
else:
self._src_lang = (src_lang if (src_lang is not None) else '__en_XX__')
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def vocab_size(self):
if (self.language_codes == 'base'):
return (((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) + 1)
else:
return ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
new_src_lang = self._convert_lang_code_special_format(new_src_lang)
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
if ((src_lang is None) or (tgt_lang is None)):
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
self.src_lang = self._convert_lang_code_special_format(src_lang)
self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='python', **kwargs) -> BatchEncoding:
self.src_lang = self._convert_lang_code_special_format(src_lang)
self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
src_lang = self._convert_lang_code_special_format(src_lang)
self.cur_lang_code = (self.lang_code_to_id[src_lang] if (src_lang is not None) else None)
self.prefix_tokens = []
if (self.cur_lang_code is not None):
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
lang = self._convert_lang_code_special_format(lang)
self.cur_lang_code = (self.lang_code_to_id[lang] if (lang is not None) else None)
self.prefix_tokens = []
if (self.cur_lang_code is not None):
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
else:
self.suffix_tokens = [self.eos_token_id]
def _convert_lang_code_special_format(self, lang: str) -> str:
lang = (FAIRSEQ_LANGUAGE_CODES_MAP[lang] if (lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys()) else lang)
return lang |
class _UtteranceExtractor(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.linear1 = nn.Linear(input_size, output_size)
self.linear2 = nn.Linear(output_size, output_size)
self.act_fn = nn.ReLU()
def input_size(self):
return self._indim
def output_size(self):
return self._outdim
def forward(self, x_BxH):
hid_BxH = self.linear1(x_BxH)
hid_BxH = self.act_fn(hid_BxH)
if self.training:
hid_BxH = self.linear2(hid_BxH)
hid_BxH = self.act_fn(hid_BxH)
return hid_BxH |
def register_Ns3SpectrumSignalParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SpectrumSignalParameters const &', 'p')])
cls.add_method('Copy', 'ns3::Ptr< ns3::SpectrumSignalParameters >', [], is_virtual=True)
cls.add_instance_attribute('psd', 'ns3::Ptr< ns3::SpectrumValue >', is_const=False)
cls.add_instance_attribute('duration', 'ns3::Time', is_const=False)
cls.add_instance_attribute('txPhy', 'ns3::Ptr< ns3::SpectrumPhy >', is_const=False)
cls.add_instance_attribute('txAntenna', 'ns3::Ptr< ns3::AntennaModel >', is_const=False)
return |
def test_is_invertible_module():
X = torch.zeros(1, 10, 10, 10)
assert (not is_invertible_module(torch.nn.Conv2d(10, 10, kernel_size=(1, 1)), test_input_shape=X.shape))
fn = AdditiveCoupling(SubModule(), implementation_bwd=(- 1), implementation_fwd=(- 1))
assert is_invertible_module(fn, test_input_shape=X.shape)
class FakeInverse(torch.nn.Module):
def forward(self, x):
return (x * 4)
def inverse(self, y):
return (y * 8)
assert (not is_invertible_module(FakeInverse(), test_input_shape=X.shape)) |
class DeltaActionEnvWrapper(gym.ActionWrapper):
def __init__(self, env):
super(DeltaActionEnvWrapper, self).__init__(env)
self.env.add_wrapper_info({'delta_action': dict()})
def action(self, action):
if (self.env.get_action_mode() == 'joint_positions'):
offset = self.env.get_robot().get_last_applied_joint_positions()
elif (self.env.get_action_mode() == 'joint_torques'):
offset = self.env.get_robot().get_latest_full_state()['torques']
elif (self.env.get_action_mode() == 'end_effector_positions'):
offset = self.env.get_robot().get_latest_full_state()['end_effector_positions']
else:
raise Exception('action mode is not known')
if self.env.are_actions_normalized():
offset = self.env.get_robot().normalize_observation_for_key(observation=offset, key=self.env.get_action_mode())
return (action + offset)
def reverse_action(self, action):
if (self.env.get_action_mode() == 'joint_positions'):
offset = self.env.get_robot().get_last_applied_joint_positions()
elif (self.env.get_action_mode() == 'joint_torques'):
offset = self.env.get_robot().get_latest_full_state()['torques']
elif (self.env.get_action_mode() == 'end_effector_positions'):
offset = self.env.get_robot().get_latest_full_state()['end_effector_positions']
else:
raise Exception('action mode is not known')
if self.env.are_actions_normalized():
offset = self.env.get_robot().normalize_observation_for_key(observation=offset, key=self.env.action_mode)
return (action - offset) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.