code stringlengths 281 23.7M |
|---|
.unit()
.parametrize(('session', 'path', 'node_info', 'expected'), [pytest.param(Session.from_config({'check_casing_of_paths': False, 'paths': (Path.cwd(),)}), Path(), NodeInfo(arg_name='', path=(), value=(Path.cwd() / 'text.txt'), task_path=(Path.cwd() / 'task_example.py'), task_name='task_example'), (Path.cwd() / 'text.txt'), id='test with absolute string path'), pytest.param(Session.from_config({'check_casing_of_paths': False, 'paths': (Path.cwd(),)}), Path(), NodeInfo(arg_name='', path=(), value=1, task_path=(Path.cwd() / 'task_example.py'), task_name='task_example'), '1', id='test with python node')])
def test_pytask_collect_node(session, path, node_info, expected):
result = pytask_collect_node(session, path, node_info)
assert (str(result.load()) == str(expected)) |
def test_bpe_sentence_embedding():
assert (BPESentenceEmbedding(Laser.DEFAULT_ENCODER_FILE).embed_bpe_sentences(['hello', 'world']).shape == (2, 1024))
with open(Laser.DEFAULT_ENCODER_FILE, 'rb') as encoder_f:
assert (BPESentenceEmbedding(encoder_f).embed_bpe_sentences(['hello', 'world']).shape == (2, 1024)) |
def find_identifier(business_logic, query, name_ok=True):
name = slug = identifier = None
if ('id' in query):
identifier = query.pop('id')[(- 1)]
elif ('slug' in query):
slug = query.pop('slug')[(- 1)]
elif (name_ok and ('name' in query)):
name = query.pop('name')[(- 1)]
if (not (name or slug or identifier)):
if name_ok:
raise Exception('Must specify id, slug, or name')
else:
raise Exception('Must specify id or slug')
query['id'] = business_logic.find_identifier(name, slug, identifier) |
def test_read_commandline(dataframe):
temp_dir = tempfile.gettempdir()
dataframe.to_csv(f'{temp_dir}/dataframe.csv', index=0)
if (sys.platform in ['win32']):
df = janitor.io.read_commandline(f'type {temp_dir}\dataframe.csv')
else:
df = janitor.io.read_commandline(f'cat {temp_dir}/dataframe.csv')
assert df.equals(dataframe)
os.unlink(f'{temp_dir}/dataframe.csv') |
def test_multilabel_independent():
edges = np.zeros((0, 2), dtype=np.int)
n_features = 5
n_labels = 4
model = MultiLabelClf(n_labels=n_labels, n_features=n_features, edges=edges)
rnd = np.random.RandomState(0)
x = rnd.normal(size=5)
w = rnd.normal(size=(n_features * n_labels))
y = model.inference(x, w)
y_ = (np.dot(w.reshape(n_labels, n_features), x) > 0)
assert_array_equal(y, y_)
joint_feature = model.joint_feature(x, y)
energy = compute_energy(model._get_unary_potentials(x, w), model._get_pairwise_potentials(x, w), edges, y)
assert_almost_equal(energy, np.dot(joint_feature, w))
y_continuous = np.zeros((n_labels, 2))
y_continuous[(np.arange(n_labels), y)] = 1
assert_array_almost_equal(joint_feature, model.joint_feature(x, (y_continuous, np.zeros((0, n_labels, n_labels))))) |
class LstmEncoder(nn.Module):
def __init__(self, args):
super(LstmEncoder, self).__init__()
self.bidirectional = args.bidirectional
if self.bidirectional:
assert ((args.hidden_size % 2) == 0)
self.hidden_size = (args.hidden_size // 2)
else:
self.hidden_size = args.hidden_size
self.layers_num = args.layers_num
self.rnn = nn.LSTM(input_size=args.emb_size, hidden_size=self.hidden_size, num_layers=args.layers_num, dropout=args.dropout, batch_first=True, bidirectional=self.bidirectional)
self.drop = nn.Dropout(args.dropout)
def forward(self, emb, seg):
hidden = self.init_hidden(emb.size(0), emb.device)
(output, hidden) = self.rnn(emb, hidden)
output = self.drop(output)
return output
def init_hidden(self, batch_size, device):
if self.bidirectional:
return (torch.zeros((self.layers_num * 2), batch_size, self.hidden_size, device=device), torch.zeros((self.layers_num * 2), batch_size, self.hidden_size, device=device))
else:
return (torch.zeros(self.layers_num, batch_size, self.hidden_size, device=device), torch.zeros(self.layers_num, batch_size, self.hidden_size, device=device)) |
class SMPHandler():
def __init__(self, crypto):
self.crypto = crypto
self.state = 1
self.g1 = DH_GENERATOR
self.g2 = None
self.g3 = None
self.g3o = None
self.x2 = None
self.x3 = None
self.prog = SMPPROG_OK
self.pab = None
self.qab = None
self.questionReceived = False
self.secret = None
self.p = None
self.q = None
def abort(self, appdata=None):
self.state = 1
self.sendTLV(proto.SMPABORTTLV(), appdata=appdata)
def sendTLV(self, tlv, appdata=None):
self.crypto.ctx.sendInternal(b'', tlvs=[tlv], appdata=appdata)
def handle(self, tlv, appdata=None):
logger.debug('handling TLV {0.__class__.__name__}'.format(tlv))
self.prog = SMPPROG_CHEATED
if isinstance(tlv, proto.SMPABORTTLV):
self.state = 1
return
is1qTlv = isinstance(tlv, proto.SMP1QTLV)
if (isinstance(tlv, proto.SMP1TLV) or is1qTlv):
if (self.state != 1):
self.abort(appdata=appdata)
return
msg = tlv.mpis
if ((not check_group(msg[0])) or (not check_group(msg[3])) or (not check_exp(msg[2])) or (not check_exp(msg[5])) or (not check_known_log(msg[1], msg[2], self.g1, msg[0], 1)) or (not check_known_log(msg[4], msg[5], self.g1, msg[3], 2))):
logger.error('invalid SMP1TLV received')
self.abort(appdata=appdata)
return
self.questionReceived = is1qTlv
self.g3o = msg[3]
self.x2 = randrange(2, DH_MAX)
self.x3 = randrange(2, DH_MAX)
self.g2 = pow(msg[0], self.x2, DH_MODULUS)
self.g3 = pow(msg[3], self.x3, DH_MODULUS)
self.prog = SMPPROG_OK
self.state = 0
return
if isinstance(tlv, proto.SMP2TLV):
if (self.state != 2):
self.abort(appdata=appdata)
return
msg = tlv.mpis
mp = msg[6]
mq = msg[7]
if ((not check_group(msg[0])) or (not check_group(msg[3])) or (not check_group(msg[6])) or (not check_group(msg[7])) or (not check_exp(msg[2])) or (not check_exp(msg[5])) or (not check_exp(msg[9])) or (not check_exp(msg[10])) or (not check_known_log(msg[1], msg[2], self.g1, msg[0], 3)) or (not check_known_log(msg[4], msg[5], self.g1, msg[3], 4))):
logger.error('invalid SMP2TLV received')
self.abort(appdata=appdata)
return
self.g3o = msg[3]
self.g2 = pow(msg[0], self.x2, DH_MODULUS)
self.g3 = pow(msg[3], self.x3, DH_MODULUS)
if (not self.check_equal_coords(msg[6:11], 5)):
logger.error('invalid SMP2TLV received')
self.abort(appdata=appdata)
return
r = randrange(2, DH_MAX)
self.p = pow(self.g3, r, DH_MODULUS)
msg = [self.p]
qa1 = pow(self.g1, r, DH_MODULUS)
qa2 = pow(self.g2, self.secret, DH_MODULUS)
self.q = ((qa1 * qa2) % DH_MODULUS)
msg.append(self.q)
msg += self.proof_equal_coords(r, 6)
inv = invMod(mp)
self.pab = ((self.p * inv) % DH_MODULUS)
inv = invMod(mq)
self.qab = ((self.q * inv) % DH_MODULUS)
msg.append(pow(self.qab, self.x3, DH_MODULUS))
msg += self.proof_equal_logs(7)
self.state = 4
self.prog = SMPPROG_OK
self.sendTLV(proto.SMP3TLV(msg), appdata=appdata)
return
if isinstance(tlv, proto.SMP3TLV):
if (self.state != 3):
self.abort(appdata=appdata)
return
msg = tlv.mpis
if ((not check_group(msg[0])) or (not check_group(msg[1])) or (not check_group(msg[5])) or (not check_exp(msg[3])) or (not check_exp(msg[4])) or (not check_exp(msg[7])) or (not self.check_equal_coords(msg[:5], 6))):
logger.error('invalid SMP3TLV received')
self.abort(appdata=appdata)
return
inv = invMod(self.p)
self.pab = ((msg[0] * inv) % DH_MODULUS)
inv = invMod(self.q)
self.qab = ((msg[1] * inv) % DH_MODULUS)
if (not self.check_equal_logs(msg[5:8], 7)):
logger.error('invalid SMP3TLV received')
self.abort(appdata=appdata)
return
md = msg[5]
msg = [pow(self.qab, self.x3, DH_MODULUS)]
msg += self.proof_equal_logs(8)
rab = pow(md, self.x3, DH_MODULUS)
self.prog = (SMPPROG_SUCCEEDED if (self.pab == rab) else SMPPROG_FAILED)
if (self.prog != SMPPROG_SUCCEEDED):
logger.error("secrets don't match")
self.abort(appdata=appdata)
self.crypto.ctx.setCurrentTrust('')
return
logger.info('secrets matched')
if (not self.questionReceived):
self.crypto.ctx.setCurrentTrust('smp')
self.state = 1
self.sendTLV(proto.SMP4TLV(msg), appdata=appdata)
return
if isinstance(tlv, proto.SMP4TLV):
if (self.state != 4):
self.abort(appdata=appdata)
return
msg = tlv.mpis
if ((not check_group(msg[0])) or (not check_exp(msg[2])) or (not self.check_equal_logs(msg[:3], 8))):
logger.error('invalid SMP4TLV received')
self.abort(appdata=appdata)
return
rab = pow(msg[0], self.x3, DH_MODULUS)
self.prog = (SMPPROG_SUCCEEDED if (self.pab == rab) else SMPPROG_FAILED)
if (self.prog != SMPPROG_SUCCEEDED):
logger.error("secrets don't match")
self.abort(appdata=appdata)
self.crypto.ctx.setCurrentTrust('')
return
logger.info('secrets matched')
self.crypto.ctx.setCurrentTrust('smp')
self.state = 1
return
def gotSecret(self, secret, question=None, appdata=None):
ourFP = self.crypto.ctx.user.getPrivkey().fingerprint()
if (self.state == 1):
combSecret = SHA256(((((b'\x01' + ourFP) + self.crypto.theirPubkey.fingerprint()) + self.crypto.sessionId) + secret))
self.secret = bytes_to_long(combSecret)
self.x2 = randrange(2, DH_MAX)
self.x3 = randrange(2, DH_MAX)
msg = [pow(self.g1, self.x2, DH_MODULUS)]
msg += proof_known_log(self.g1, self.x2, 1)
msg.append(pow(self.g1, self.x3, DH_MODULUS))
msg += proof_known_log(self.g1, self.x3, 2)
self.prog = SMPPROG_OK
self.state = 2
if (question is None):
self.sendTLV(proto.SMP1TLV(msg), appdata=appdata)
else:
self.sendTLV(proto.SMP1QTLV(question, msg), appdata=appdata)
if (self.state == 0):
combSecret = SHA256(((((b'\x01' + self.crypto.theirPubkey.fingerprint()) + ourFP) + self.crypto.sessionId) + secret))
self.secret = bytes_to_long(combSecret)
msg = [pow(self.g1, self.x2, DH_MODULUS)]
msg += proof_known_log(self.g1, self.x2, 3)
msg.append(pow(self.g1, self.x3, DH_MODULUS))
msg += proof_known_log(self.g1, self.x3, 4)
r = randrange(2, DH_MAX)
self.p = pow(self.g3, r, DH_MODULUS)
msg.append(self.p)
qb1 = pow(self.g1, r, DH_MODULUS)
qb2 = pow(self.g2, self.secret, DH_MODULUS)
self.q = ((qb1 * qb2) % DH_MODULUS)
msg.append(self.q)
msg += self.proof_equal_coords(r, 5)
self.state = 3
self.sendTLV(proto.SMP2TLV(msg), appdata=appdata)
def proof_equal_coords(self, r, v):
r1 = randrange(2, DH_MAX)
r2 = randrange(2, DH_MAX)
temp2 = ((pow(self.g1, r1, DH_MODULUS) * pow(self.g2, r2, DH_MODULUS)) % DH_MODULUS)
temp1 = pow(self.g3, r1, DH_MODULUS)
cb = SHA256(((struct.pack(b'B', v) + pack_mpi(temp1)) + pack_mpi(temp2)))
c = bytes_to_long(cb)
temp1 = ((r * c) % SM_ORDER)
d1 = ((r1 - temp1) % SM_ORDER)
temp1 = ((self.secret * c) % SM_ORDER)
d2 = ((r2 - temp1) % SM_ORDER)
return (c, d1, d2)
def check_equal_coords(self, coords, v):
(p, q, c, d1, d2) = coords
temp1 = ((pow(self.g3, d1, DH_MODULUS) * pow(p, c, DH_MODULUS)) % DH_MODULUS)
temp2 = (((pow(self.g1, d1, DH_MODULUS) * pow(self.g2, d2, DH_MODULUS)) * pow(q, c, DH_MODULUS)) % DH_MODULUS)
cprime = SHA256(((struct.pack(b'B', v) + pack_mpi(temp1)) + pack_mpi(temp2)))
return (long_to_bytes(c, 32) == cprime)
def proof_equal_logs(self, v):
r = randrange(2, DH_MAX)
temp1 = pow(self.g1, r, DH_MODULUS)
temp2 = pow(self.qab, r, DH_MODULUS)
cb = SHA256(((struct.pack(b'B', v) + pack_mpi(temp1)) + pack_mpi(temp2)))
c = bytes_to_long(cb)
temp1 = ((self.x3 * c) % SM_ORDER)
d = ((r - temp1) % SM_ORDER)
return (c, d)
def check_equal_logs(self, logs, v):
(r, c, d) = logs
temp1 = ((pow(self.g1, d, DH_MODULUS) * pow(self.g3o, c, DH_MODULUS)) % DH_MODULUS)
temp2 = ((pow(self.qab, d, DH_MODULUS) * pow(r, c, DH_MODULUS)) % DH_MODULUS)
cprime = SHA256(((struct.pack(b'B', v) + pack_mpi(temp1)) + pack_mpi(temp2)))
return (long_to_bytes(c, 32) == cprime) |
def _num_type(value):
if ('.' in value):
try:
value_out = float(value)
return value_out
except ValueError:
value_out = value
return value_out
else:
try:
value_out = int(value)
return value_out
except ValueError:
value_out = value
return value_out |
class TestGetInputFocus(EndianTest):
def setUp(self):
self.req_args_0 = {}
self.req_bin_0 = b'+\x00\x00\x01'
self.reply_args_0 = {'focus': , 'revert_to': 153, 'sequence_number': 4228}
self.reply_bin_0 = b'\x01\x99\x10\x84\x00\x00\x00\x003\x8a\x18\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.GetInputFocus._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.GetInputFocus._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.GetInputFocus._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.GetInputFocus._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
def dependentSchemas(validator, dependentSchemas, instance, schema):
if (not validator.is_type(instance, 'object')):
return
for (property, dependency) in dependentSchemas.items():
if (property not in instance):
continue
(yield from validator.descend(instance, dependency, schema_path=property)) |
class AggregatedTransform(TransformComponent):
def __init__(self, functions: List[Function], filter_expression: str=None):
super(AggregatedTransform, self).__init__()
self.functions = functions
self.filter_expression = filter_expression
def aggregations(self) -> List[Tuple]:
column_name = (self._parent.from_column or self._parent.name)
expression = (when(expr(self.filter_expression), col(column_name)) if self.filter_expression else column_name)
Function = namedtuple('Function', ['function', 'data_type'])
return [Function(f.func(expression), f.data_type.spark) for f in self.functions]
def _get_output_name(self, function: object) -> str:
if (not hasattr(function, '__name__')):
feature_name = self._parent.name
raise AttributeError(f'''Anonymous functions are not supported on AggregatedTransform.
Check feature {feature_name} transforms.
''')
base_name = '__'.join([self._parent.name, str(function.__name__).lower()])
return base_name
def output_columns(self) -> List[str]:
return [self._get_output_name(f.func) for f in self.functions]
def transform(self, dataframe: DataFrame) -> DataFrame:
raise NotImplementedError("AggregatedTransform won't be used outside an AggregatedFeatureSet, meaning the responsibility of aggregating and apply the transformation is now over the FeatureSet component. This should optimize the ETL process.") |
def get_scheduler(optimizer, n_epochs: int, loss_name=None):
scheduler = MultiStepLR
if (n_epochs <= 20):
scheduler = scheduler(optimizer, milestones=[10, 15], gamma=0.1)
elif (n_epochs <= 30):
scheduler = scheduler(optimizer, milestones=[15, 25], gamma=0.1)
elif (n_epochs <= 40):
scheduler = scheduler(optimizer, milestones=[20, 30], gamma=0.1)
elif (n_epochs <= 50):
scheduler = scheduler(optimizer, milestones=[25, 40], gamma=0.1)
elif (n_epochs <= 60):
scheduler = scheduler(optimizer, milestones=[30, 50], gamma=0.1)
elif (n_epochs <= 70):
scheduler = scheduler(optimizer, milestones=[40, 60], gamma=0.1)
elif (n_epochs <= 80):
scheduler = scheduler(optimizer, milestones=[30, 50, 70], gamma=0.1)
elif (n_epochs <= 120):
scheduler = scheduler(optimizer, milestones=[40, 80, 100], gamma=0.1)
elif (n_epochs <= 160):
scheduler = scheduler(optimizer, milestones=[40, 80, 120, 140], gamma=0.1)
else:
scheduler = scheduler(optimizer, milestones=[60, 100, 140, 180], gamma=0.1)
return scheduler |
def setup(loop, args):
def verbose(s):
if (args.v >= 2):
sys.stdout.write((('\x1b[32m' + time.strftime('%Y-%m-%d %H:%M:%S')) + '\x1b[m '))
sys.stdout.write((s + '\x1b[0K\n'))
else:
sys.stdout.write((s + '\n'))
sys.stdout.flush()
args.verbose = verbose
args.stats = {0: ([0] * 6)}
def modstat(user, remote_ip, host_name, stats=args.stats):
u = ((user.decode().split(':')[0] + ':') if isinstance(user, (bytes, bytearray)) else '')
host_name_2 = ('.'.join(host_name.split('.')[((- 3) if host_name.endswith('.com.cn') else (- 2)):]) if host_name.split('.')[(- 1)].isalpha() else host_name)
tostat = (stats[0], stats.setdefault((u + remote_ip), {}).setdefault(host_name_2, ([0] * 6)))
return (lambda i: (lambda s: [st.__setitem__(i, (st[i] + s)) for st in tostat]))
args.modstat = modstat
def win_readline(handler):
while True:
line = sys.stdin.readline()
handler()
if (args.v >= 2):
asyncio.ensure_future(realtime_stat(args.stats[0]))
if (sys.platform != 'win32'):
loop.add_reader(sys.stdin, functools.partial(all_stat_other, args.stats))
else:
loop.run_in_executor(None, win_readline, functools.partial(all_stat, args.stats)) |
class BaseEmbed(Seeder, metaclass=ABCMeta):
def __init__(self, options) -> None:
super().__init__(options, enabled=(options.no_seed is False))
self.download = options.download
self.extra_search_dir = [i.resolve() for i in options.extra_search_dir if i.exists()]
self.pip_version = options.pip
self.setuptools_version = options.setuptools
self.wheel_version = options.wheel
self.no_pip = options.no_pip
self.no_setuptools = options.no_setuptools
self.no_wheel = options.no_wheel
self.app_data = options.app_data
self.periodic_update = (not options.no_periodic_update)
if (not self.distribution_to_versions()):
self.enabled = False
def distributions(cls) -> dict[(str, Version)]:
return {'pip': Version.bundle, 'setuptools': Version.bundle, 'wheel': Version.bundle}
def distribution_to_versions(self) -> dict[(str, str)]:
return {distribution: getattr(self, f'{distribution}_version') for distribution in self.distributions() if ((getattr(self, f'no_{distribution}') is False) and (getattr(self, f'{distribution}_version') != 'none'))}
def add_parser_arguments(cls, parser, interpreter, app_data):
group = parser.add_mutually_exclusive_group()
group.add_argument('--no-download', '--never-download', dest='download', action='store_false', help=f"pass to disable download of the latest {'/'.join(cls.distributions())} from PyPI", default=True)
group.add_argument('--download', dest='download', action='store_true', help=f"pass to enable download of the latest {'/'.join(cls.distributions())} from PyPI", default=False)
parser.add_argument('--extra-search-dir', metavar='d', type=Path, nargs='+', help='a path containing wheels to extend the internal wheel list (can be set 1+ times)', default=[])
for (distribution, default) in cls.distributions().items():
if ((interpreter.version_info[:2] >= (3, 12)) and (distribution in {'wheel', 'setuptools'})):
default = 'none'
parser.add_argument(f'--{distribution}', dest=distribution, metavar='version', help=f'version of {distribution} to install as seed: embed, bundle, none or exact version', default=default)
for distribution in cls.distributions():
parser.add_argument(f'--no-{distribution}', dest=f'no_{distribution}', action='store_true', help=f'do not install {distribution}', default=False)
parser.add_argument('--no-periodic-update', dest='no_periodic_update', action='store_true', help='disable the periodic (once every 14 days) update of the embedded wheels', default=(not PERIODIC_UPDATE_ON_BY_DEFAULT))
def __repr__(self) -> str:
result = self.__class__.__name__
result += '('
if self.extra_search_dir:
result += f"extra_search_dir={', '.join((str(i) for i in self.extra_search_dir))},"
result += f'download={self.download},'
for distribution in self.distributions():
if getattr(self, f'no_{distribution}'):
continue
version = getattr(self, f'{distribution}_version', None)
if (version == 'none'):
continue
ver = f"={(version or 'latest')}"
result += f' {distribution}{ver},'
return (result[:(- 1)] + ')') |
class _TestStateful():
def state_dict(self) -> Dict[(str, Any)]:
return {'foo': torch.Tensor(1), 'bar': torch.Tensor(1), 'baz': [torch.Tensor(1), torch.Tensor(1)], 'qux': {'quux': torch.Tensor(1), 'quuz': torch.Tensor(1)}}
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
raise NotImplementedError() |
def test_async_cmds_overwrite_vs_append(temp_dir):
stdout = temp_dir.joinpath('mydir/stdout')
stderr = temp_dir.joinpath('mydir/stderr')
cmd1 = get_cmd('tests/testfiles/cmds/echo-out-and-err.sh one', 'tests\\testfiles\\cmds\\echo-out-and-err.bat one')
context = Context({'cmds': {'run': [cmd1], 'stdout': stdout, 'stderr': stderr}})
pypyr.steps.cmds.run_step(context)
assert ('cmdOut' not in context)
assert (stdout.read_text() == 'stdout one\n')
assert (stderr.read_text() == 'stderr one\n')
cmd2 = get_cmd('tests/testfiles/cmds/echo-out-and-err.sh two', 'tests\\testfiles\\cmds\\echo-out-and-err.bat two')
context = Context({'cmds': {'run': [cmd2], 'stdout': stdout, 'stderr': stderr}})
pypyr.steps.cmds.run_step(context)
assert (stdout.read_text() == 'stdout two\n')
assert (stderr.read_text() == 'stderr two\n')
cmd3 = get_cmd('tests/testfiles/cmds/echo-out-and-err.sh three', 'tests\\testfiles\\cmds\\echo-out-and-err.bat three')
context = Context({'cmds': [{'run': [cmd3], 'stdout': stdout, 'stderr': stderr, 'append': True}]})
pypyr.steps.cmds.run_step(context)
assert ('cmdOut' not in context)
assert (stdout.read_text() == 'stdout two\nstdout three\n')
assert (stderr.read_text() == 'stderr two\nstderr three\n') |
def test_many_generalizers():
gg = _make_composite_generalizer(cirq_to_bloqs, ignore_cliffords, ignore_alloc_free, ignore_split_join, generalize_cvs, generalize_rotation_angle)
bloqs = [gg(b) for b in _BLOQS_TO_FILTER]
bloqs = [b for b in bloqs if (b is not None)]
assert (bloqs == [And(CV, CV), MultiAnd(((CV,) * 4)), Rx(PHI)]) |
class SobelOperator(nn.Module):
def __init__(self, epsilon):
super().__init__()
self.epsilon = epsilon
x_kernel = (np.array([[1, 0, (- 1)], [2, 0, (- 2)], [1, 0, (- 1)]]) / 4)
self.conv_x = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv_x.weight.data = torch.tensor(x_kernel).unsqueeze(0).unsqueeze(0).float().cuda()
self.conv_x.weight.requires_grad = False
y_kernel = (np.array([[1, 2, 1], [0, 0, 0], [(- 1), (- 2), (- 1)]]) / 4)
self.conv_y = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv_y.weight.data = torch.tensor(y_kernel).unsqueeze(0).unsqueeze(0).float().cuda()
self.conv_y.weight.requires_grad = False
def forward(self, x):
(b, c, h, w) = x.shape
if (c > 1):
x = x.view((b * c), 1, h, w)
x = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
grad_x = self.conv_x(x)
grad_y = self.conv_y(x)
x = torch.sqrt((((grad_x ** 2) + (grad_y ** 2)) + self.epsilon))
x = x.view(b, c, h, w)
return x |
def load_test_model(opt, dummy_opt, model_path=None):
if (model_path is None):
model_path = opt.models[0]
checkpoint = torch.load(model_path, map_location=(lambda storage, loc: storage))
fields = load_fields_from_vocab(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt:
if (arg not in model_opt):
model_opt.__dict__[arg] = dummy_opt[arg]
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return (fields, model) |
def cal_group_auc(labels, preds, impression_id_list):
if (len(impression_id_list) != len(labels)):
raise ValueError('impression id num should equal to the sample num,impression id num is {0}'.format(len(impression_id_list)))
group_score = defaultdict((lambda : []))
group_truth = defaultdict((lambda : []))
for (idx, truth) in enumerate(labels):
user_id = impression_id_list[idx]
score = preds[idx]
truth = labels[idx]
group_score[user_id].append(score)
group_truth[user_id].append(truth)
group_flag = defaultdict((lambda : False))
for user_id in set(impression_id_list):
truths = group_truth[user_id]
flag = False
for i in range((len(truths) - 1)):
if (truths[i] != truths[(i + 1)]):
flag = True
break
group_flag[user_id] = flag
impression_total = 0
total_auc = 0
for user_id in group_flag:
if group_flag[user_id]:
auc = roc_auc_score(np.asarray(group_truth[user_id]), np.asarray(group_score[user_id]))
total_auc += (auc * len(group_truth[user_id]))
impression_total += len(group_truth[user_id])
group_auc = (float(total_auc) / impression_total)
group_auc = round(group_auc, 4)
return group_auc |
def main(args):
serialization_dir = args.serialization_dir
pruning_method = args.pruning_method
threshold = args.threshold
st = torch.load(os.path.join(serialization_dir, 'pytorch_model.bin'), map_location='cpu')
remaining_count = 0
encoder_count = 0
print('name'.ljust(60, ' '), 'Remaining Weights %', 'Remaining Weight')
for (name, param) in st.items():
if ('encoder' not in name):
continue
if ('mask_scores' in name):
if (pruning_method == 'topK'):
mask_ones = TopKBinarizer.apply(param, threshold).sum().item()
elif (pruning_method == 'sigmoied_threshold'):
mask_ones = ThresholdBinarizer.apply(param, threshold, True).sum().item()
elif (pruning_method == 'l0'):
(l, r) = ((- 0.1), 1.1)
s = torch.sigmoid(param)
s_bar = ((s * (r - l)) + l)
mask = s_bar.clamp(min=0.0, max=1.0)
mask_ones = (mask > 0.0).sum().item()
else:
raise ValueError('Unknown pruning method')
remaining_count += mask_ones
print(name.ljust(60, ' '), str(round(((100 * mask_ones) / param.numel()), 3)).ljust(20, ' '), str(mask_ones))
else:
encoder_count += param.numel()
if (('bias' in name) or ('LayerNorm' in name)):
remaining_count += param.numel()
print('')
print('Remaining Weights (global) %: ', ((100 * remaining_count) / encoder_count)) |
class SendMessageForm(forms.ModelForm):
class Meta():
model = Message
fields = ('body',)
labels = {'body': _('message')}
error_messages = {'body': {'required': _("can't really understand you")}}
def clean(self):
msg = self.cleaned_data.get('body', '')
if (len(msg) < 3):
raise forms.ValidationError(gettext('that message is just too short'))
super().clean() |
class Trainer(object):
def __init__(self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
(self.loss_stats, self.loss) = self._get_losses(opt)
self.model_with_loss = ModleWithLoss(model, self.loss)
def set_device(self, gpus, chunk_sizes, device):
if (len(gpus) > 1):
self.model_with_loss = DataParallel(self.model_with_loss, device_ids=gpus, chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for (k, v) in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if (phase == 'train'):
model_with_loss.train()
else:
if (len(self.opt.gpus) > 1):
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
(data_time, batch_time) = (AverageMeter(), AverageMeter())
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats if ((l == 'tot') or (opt.weights[l] > 0))}
num_iters = (len(data_loader) if (opt.num_iters < 0) else opt.num_iters)
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for (iter_id, batch) in enumerate(data_loader):
if (iter_id >= num_iters):
break
data_time.update((time.time() - end))
for k in batch:
if (k != 'meta'):
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
(output, loss, loss_stats) = model_with_loss(batch)
loss = loss.mean()
if (phase == 'train'):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(epoch, iter_id, num_iters, phase=phase, total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(loss_stats[l].mean().item(), batch['image'].size(0))
Bar.suffix = (Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg))
Bar.suffix = (Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) |Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time))
if (opt.print_iter > 0):
if ((iter_id % opt.print_iter) == 0):
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if (opt.debug > 0):
self.debug(batch, output, iter_id, dataset=data_loader.dataset)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for (k, v) in avg_loss_stats.items()}
ret['time'] = (bar.elapsed_td.total_seconds() / 60.0)
return (ret, results)
def _get_losses(self, opt):
loss_order = ['hm', 'wh', 'reg', 'ltrb', 'hps', 'hm_hp', 'hp_offset', 'dep', 'dim', 'rot', 'amodel_offset', 'ltrb_amodal', 'tracking', 'nuscenes_att', 'velocity']
loss_states = (['tot'] + [k for k in loss_order if (k in opt.heads)])
loss = GenericLoss(opt)
return (loss_states, loss)
def debug(self, batch, output, iter_id, dataset):
opt = self.opt
if ('pre_hm' in batch):
output.update({'pre_hm': batch['pre_hm']})
dets = generic_decode(output, K=opt.K, opt=opt)
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
dets_gt = batch['meta']['gt_det']
for i in range(1):
debugger = Debugger(opt=opt, dataset=dataset)
img = batch['image'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip((((img * dataset.std) + dataset.mean) * 255.0), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
if ('pre_img' in batch):
pre_img = batch['pre_img'][i].detach().cpu().numpy().transpose(1, 2, 0)
pre_img = np.clip((((pre_img * dataset.std) + dataset.mean) * 255), 0, 255).astype(np.uint8)
debugger.add_img(pre_img, 'pre_img_pred')
debugger.add_img(pre_img, 'pre_img_gt')
if ('pre_hm' in batch):
pre_hm = debugger.gen_colormap(batch['pre_hm'][i].detach().cpu().numpy())
debugger.add_blend_img(pre_img, pre_hm, 'pre_hm')
debugger.add_img(img, img_id='out_pred')
if ('ltrb_amodal' in opt.heads):
debugger.add_img(img, img_id='out_pred_amodal')
debugger.add_img(img, img_id='out_gt_amodal')
for k in range(len(dets['scores'][i])):
if (dets['scores'][(i, k)] > opt.vis_thresh):
debugger.add_coco_bbox((dets['bboxes'][(i, k)] * opt.down_ratio), dets['clses'][(i, k)], dets['scores'][(i, k)], img_id='out_pred')
if ('ltrb_amodal' in opt.heads):
debugger.add_coco_bbox((dets['bboxes_amodal'][(i, k)] * opt.down_ratio), dets['clses'][(i, k)], dets['scores'][(i, k)], img_id='out_pred_amodal')
if (('hps' in opt.heads) and (int(dets['clses'][(i, k)]) == 0)):
debugger.add_coco_hp((dets['hps'][(i, k)] * opt.down_ratio), img_id='out_pred')
if ('tracking' in opt.heads):
debugger.add_arrow((dets['cts'][i][k] * opt.down_ratio), (dets['tracking'][i][k] * opt.down_ratio), img_id='out_pred')
debugger.add_arrow((dets['cts'][i][k] * opt.down_ratio), (dets['tracking'][i][k] * opt.down_ratio), img_id='pre_img_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt['scores'][i])):
if (dets_gt['scores'][i][k] > opt.vis_thresh):
debugger.add_coco_bbox((dets_gt['bboxes'][i][k] * opt.down_ratio), dets_gt['clses'][i][k], dets_gt['scores'][i][k], img_id='out_gt')
if ('ltrb_amodal' in opt.heads):
debugger.add_coco_bbox((dets_gt['bboxes_amodal'][(i, k)] * opt.down_ratio), dets_gt['clses'][(i, k)], dets_gt['scores'][(i, k)], img_id='out_gt_amodal')
if (('hps' in opt.heads) and (int(dets['clses'][(i, k)]) == 0)):
debugger.add_coco_hp((dets_gt['hps'][i][k] * opt.down_ratio), img_id='out_gt')
if ('tracking' in opt.heads):
debugger.add_arrow((dets_gt['cts'][i][k] * opt.down_ratio), (dets_gt['tracking'][i][k] * opt.down_ratio), img_id='out_gt')
debugger.add_arrow((dets_gt['cts'][i][k] * opt.down_ratio), (dets_gt['tracking'][i][k] * opt.down_ratio), img_id='pre_img_gt')
if ('hm_hp' in opt.heads):
pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
debugger.add_blend_img(img, gt, 'gt_hmhp')
if (('rot' in opt.heads) and ('dim' in opt.heads) and ('dep' in opt.heads)):
dets_gt = {k: dets_gt[k].cpu().numpy() for k in dets_gt}
calib = (batch['meta']['calib'].detach().numpy() if ('calib' in batch['meta']) else None)
det_pred = generic_post_process(opt, dets, batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib)
det_gt = generic_post_process(opt, dets_gt, batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib)
debugger.add_3d_detection(batch['meta']['img_path'][i], batch['meta']['flipped'][i], det_pred[i], calib[i], vis_thresh=opt.vis_thresh, img_id='add_pred')
debugger.add_3d_detection(batch['meta']['img_path'][i], batch['meta']['flipped'][i], det_gt[i], calib[i], vis_thresh=opt.vis_thresh, img_id='add_gt')
debugger.add_bird_views(det_pred[i], det_gt[i], vis_thresh=opt.vis_thresh, img_id='bird_pred_gt')
if (opt.debug == 4):
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader) |
def get_kernel_offsets(size: Union[(int, Tuple[(int, ...)])], stride: Union[(int, Tuple[(int, ...)])]=1, dilation: Union[(int, Tuple[(int, ...)])]=1, device: str='cpu') -> torch.Tensor:
size = make_ntuple(size, ndim=3)
stride = make_ntuple(stride, ndim=3)
dilation = make_ntuple(dilation, ndim=3)
offsets = [((np.arange((((- size[k]) // 2) + 1), ((size[k] // 2) + 1)) * stride[k]) * dilation[k]) for k in range(3)]
if ((np.prod(size) % 2) == 1):
offsets = [[x, y, z] for z in offsets[2] for y in offsets[1] for x in offsets[0]]
else:
offsets = [[x, y, z] for x in offsets[0] for y in offsets[1] for z in offsets[2]]
offsets = torch.tensor(offsets, dtype=torch.int, device=device)
return offsets |
class WRN_40_2_WRN_40_2(nn.Module):
def __init__(self, num_classes):
super(WRN_40_2_WRN_40_2, self).__init__()
self.net1 = wrn_40_2_aux(num_classes=num_classes)
self.net2 = wrn_40_2_aux(num_classes=num_classes)
def forward(self, x, grad=True):
(logit1, ss_logits1) = self.net1(x, grad=grad)
(logit2, ss_logits2) = self.net2(x, grad=grad)
return ([logit1, logit2], [ss_logits1, ss_logits2]) |
class Z3QuantifierEliminator(QuantifierEliminator):
LOGICS = [LIA, LRA]
def __init__(self, environment, logic=None):
QuantifierEliminator.__init__(self)
self.environment = environment
self.logic = logic
self.converter = Z3Converter(environment, z3.main_ctx())
def eliminate_quantifiers(self, formula):
logic = get_logic(formula, self.environment)
if ((not (logic <= LRA)) and (not (logic <= LIA))):
raise PysmtValueError(('Z3 quantifier elimination only supports LRA or LIA without combination.(detected logic is: %s)' % str(logic)))
simplifier = z3.Tactic('simplify')
eliminator = z3.Tactic('qe')
f = self.converter.convert(formula)
s = simplifier(f, elim_and=True, pull_cheap_ite=True, ite_extra_rules=True).as_expr()
res = eliminator(f).as_expr()
pysmt_res = None
try:
pysmt_res = self.converter.back(res)
except ConvertExpressionError:
if (logic <= LRA):
raise
raise ConvertExpressionError(message=("Unable to representexpression %s in pySMT: the quantifier elimination for LIA is incomplete as it requires the modulus. You can find the Z3 expression representing the quantifier elimination as the attribute 'expression' of this exception object" % str(res)), expression=res)
return pysmt_res
def _exit(self):
pass |
def format_received_item(item_name: str, player_name: str) -> str:
special = {'Locked Power Bomb Expansion': 'Received Power Bomb Expansion from {provider_name}, but the main Power Bomb is required to use it.', 'Locked Missile Expansion': 'Received Missile Expansion from {provider_name}, but the Missile Launcher is required to use it.'}
generic = 'Received {item_name} from {provider_name}.'
return special.get(item_name, generic).format(item_name=item_name, provider_name=player_name) |
((sensors is None), 'No PySensors module found')
class TestLMSensorsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('LMSensorsCollector', {})
self.collector = LMSensorsCollector(config, None)
def test_import(self):
self.assertTrue(LMSensorsCollector)
(Collector, 'publish')
def test_simple_sensor(self, publish_mock):
feature = FeatureMock('Core 0', 10)
chip = ChipMock('coretemp-isa-0000', [feature])
patch_detected_chips_iter = patch('sensors.iter_detected_chips', return_value=[chip])
patch_detected_chips_iter.start()
self.collector.collect()
patch_detected_chips_iter.stop()
self.assertPublished(publish_mock, 'coretemp-isa-0000.Core-0', 10)
(Collector, 'publish')
def test_empty_sensor(self, publish_mock):
feature = FeatureMock('Core 0')
chip = ChipMock('coretemp-isa-0000', [feature])
patch_detected_chips_iter = patch('sensors.iter_detected_chips', return_value=[chip])
patch_detected_chips_iter.start()
self.collector.collect()
patch_detected_chips_iter.stop()
self.assertUnpublished(publish_mock, 'coretemp-isa-0000.Core-0', None)
(Collector, 'publish')
def test_empty_zero_sensor(self, publish_mock):
self.collector.config['send_zero'] = True
feature = FeatureMock('Core 0')
chip = ChipMock('coretemp-isa-0000', [feature])
patch_detected_chips_iter = patch('sensors.iter_detected_chips', return_value=[chip])
patch_detected_chips_iter.start()
self.collector.collect()
patch_detected_chips_iter.stop()
self.assertPublished(publish_mock, 'coretemp-isa-0000.Core-0', 0) |
def generate_data(num_relations, num_tuples, relations_given, LAMA_path):
graph_path = 'data/pattern_data/graphs_tense/'
relations_path = glob.glob((graph_path + '*.graph'))
output_path = 'pararel/ft/data/'
if (not os.path.exists(output_path)):
os.mkdir(output_path)
random.shuffle(relations_path)
relation_path_keep = []
metadata = '_'
if (relations_given != ''):
relations_given = sorted(relations_given.split(','))
for relation_path in relations_path:
relation = relation_path.split('/')[(- 1)].split('.')[0]
if (relation in relations_given):
relation_path_keep.append(relation_path)
metadata += '_'.join(relations_given)
metadata += '-'
if (len(relation_path_keep) < num_relations):
for relation_path in relations_path:
if (relation_path not in relation_path_keep):
relation = relation_path.split('/')[(- 1)].split('.')[0]
relation_path_keep.append(relation_path)
metadata += relation
metadata += '-'
if (len(relation_path_keep) == num_relations):
break
metadata = metadata.strip('-')
output_path = (((((output_path + str(num_tuples)) + '_') + str(num_relations)) + metadata) + '/')
if (not os.path.exists(output_path)):
print('Saving data to: ', output_path)
os.mkdir(output_path)
output_path_true = (output_path + 'train_')
output_path_mlm = (output_path + 'train_mlm.txt')
f_mlm = open(output_path_mlm, 'w')
for relation_path in relation_path_keep:
with open(relation_path, 'rb') as f:
graph = pickle.load(f)
relation = relation_path.split('/')[(- 1)].split('.')[0]
f_true = open(((output_path_true + relation) + '.txt'), 'w')
data = utils.read_jsonl_file(((LAMA_path + relation) + '.jsonl'))
random.shuffle(data)
for (i, d) in enumerate(data):
random.shuffle(data)
for node in graph.nodes():
pattern = node.lm_pattern
pattern = pattern.replace('[X]', d['sub_label'])
pattern = pattern.replace('[Y]', '[MASK]')
pattern_mlm = pattern.replace('[MASK]', d['obj_label'])
f_true.write(pattern)
f_true.write('\n')
f_mlm.write(pattern_mlm)
f_mlm.write('\n')
f_true.write('\n')
if (i >= num_tuples):
break
f_true.close()
f_mlm.close()
else:
print('Data already exists') |
class SolveMatrixTimeSuite():
params = [[True, False], [((- 1.0), 1.0), (0.0, 1.0), (0.2, 1.0), (0.5, 1.0)], [100, 350, 700]]
param_names = ['is_hermitian', 'minmaxeival', 'n']
def setup(self, is_hermitian, minmaxeival, n):
seed = 123
ncols = 50
torch.manual_seed(seed)
(min_eival, max_eival) = minmaxeival
A = create_random_square_matrix(n, is_hermitian=is_hermitian, min_eival=min_eival, max_eival=max_eival, seed=seed)
self.A = LinearOperator.m(A, is_hermitian=is_hermitian)
X = torch.randn(n, ncols, dtype=A.dtype)
self.B = self.A.mm(X)
def time_matrix_AB(self, *args, **kwargs):
with warnings.catch_warnings(record=True) as ws:
X = solve(self.A, self.B)
_catch_convergence_warnings(ws) |
def query_paths_args(chain_id, token_network_state, one_to_n_address, our_address) -> Dict[(str, Any)]:
return dict(our_address=our_address, privkey=PRIVKEY, current_block_number=10, token_network_address=token_network_state.address, one_to_n_address=one_to_n_address, chain_id=chain_id, route_from=our_address, route_to=factories.make_address(), value=50, pfs_wait_for_block=10) |
def test_top_down_pose_tracking_demo():
pose_model = init_pose_model('configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/res50_coco_256x192.py', None, device='cpu')
image_name = 'tests/data/coco/.jpg'
dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
person_result = [{'bbox': [50, 50, 50, 100]}]
(pose_results, _) = inference_top_down_pose_model(pose_model, image_name, person_result, format='xywh', dataset_info=dataset_info)
(pose_results, next_id) = get_track_id(pose_results, [], next_id=0)
vis_pose_tracking_result(pose_model, image_name, pose_results, dataset_info=dataset_info)
pose_results_last = pose_results
pose_model = init_pose_model('configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/aic/res50_aic_256x192.py', None, device='cpu')
image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
(pose_results, _) = inference_top_down_pose_model(pose_model, image_name, person_result, format='xywh', dataset_info=dataset_info)
(pose_results, next_id) = get_track_id(pose_results, pose_results_last, next_id)
for pose_result in pose_results:
del pose_result['bbox']
(pose_results, next_id) = get_track_id(pose_results, pose_results_last, next_id)
vis_pose_tracking_result(pose_model, image_name, pose_results, dataset_info=dataset_info)
pose_model = init_pose_model('configs/hand/2d_kpt_sview_rgb_img/topdown_heatmap/onehand10k/res50_onehand10k_256x256.py', None, device='cpu')
image_name = 'tests/data/onehand10k/9.jpg'
dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
(pose_results, _) = inference_top_down_pose_model(pose_model, image_name, [{'bbox': [10, 10, 30, 30]}], format='xywh', dataset_info=dataset_info)
(pose_results, next_id) = get_track_id(pose_results, pose_results_last, next_id)
vis_pose_tracking_result(pose_model, image_name, pose_results, dataset_info=dataset_info)
pose_model = init_pose_model('configs/hand/2d_kpt_sview_rgb_img/topdown_heatmap/interhand2d/res50_interhand2d_all_256x256.py', None, device='cpu')
image_name = 'tests/data/interhand2.6m/image2017.jpg'
dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
(pose_results, _) = inference_top_down_pose_model(pose_model, image_name, [{'bbox': [50, 50, 0, 0]}], format='xywh', dataset_info=dataset_info)
(pose_results, next_id) = get_track_id(pose_results, [], next_id=0)
vis_pose_tracking_result(pose_model, image_name, pose_results, dataset_info=dataset_info)
pose_results_last = pose_results
pose_model = init_pose_model('configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/mpii/res50_mpii_256x256.py', None, device='cpu')
image_name = 'tests/data/mpii/.jpg'
dataset_info = DatasetInfo(pose_model.cfg.data['test']['dataset_info'])
(pose_results, _) = inference_top_down_pose_model(pose_model, image_name, [{'bbox': [50, 50, 0, 0]}], format='xywh', dataset_info=dataset_info)
(pose_results, next_id) = get_track_id(pose_results, pose_results_last, next_id)
vis_pose_tracking_result(pose_model, image_name, pose_results, dataset_info=dataset_info) |
def try_finally_try(builder: IRBuilder, err_handler: BasicBlock, return_entry: BasicBlock, main_entry: BasicBlock, try_body: GenFunc) -> ((Register | AssignmentTarget) | None):
control = TryFinallyNonlocalControl(return_entry)
builder.builder.push_error_handler(err_handler)
builder.nonlocal_control.append(control)
builder.goto_and_activate(BasicBlock())
try_body()
builder.goto(main_entry)
builder.nonlocal_control.pop()
builder.builder.pop_error_handler()
return control.ret_reg |
class Effect173(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'damageMultiplier', (container.getModifiedItemAttr('damageMultiplierBonus') * level), **kwargs) |
class VGG16(Network):
alpha = [0, 0, 0, 1, 1]
beta = [1, 1, 1, 1, 1]
def setup(self):
self.conv(3, 3, 3, 64, name='conv1_1').conv(3, 3, 64, 64, name='conv1_2').pool().conv(3, 3, 64, 128, name='conv2_1').conv(3, 3, 128, 128, name='conv2_2').pool().conv(3, 3, 128, 256, name='conv3_1').conv(3, 3, 256, 256, name='conv3_2').conv(3, 3, 256, 256, name='conv3_3').pool().conv(3, 3, 256, 512, name='conv4_1').conv(3, 3, 512, 512, name='conv4_2').conv(3, 3, 512, 512, name='conv4_3').pool().conv(3, 3, 512, 512, name='conv5_1').conv(3, 3, 512, 512, name='conv5_2').conv(3, 3, 512, 512, name='conv5_3').pool()
def y(self):
return [self.vardict['conv1_2'], self.vardict['conv2_2'], self.vardict['conv3_3'], self.vardict['conv4_3'], self.vardict['conv5_3']] |
class TestNonNegSqrt():
def test_main(self):
vals = ((- 1.0), 0.0, 1.0, 2.0)
desireds = (0.0, 0.0, 1.0, sqrt(2.0))
for (val, desired) in zip(vals, desireds):
x = torch.tensor(val)
y = pystiche.nonnegsqrt(x)
assert (y == ptu.approx(desired))
def test_grad(self):
vals = ((- 1.0), 0.0, 1.0, 2.0)
desireds = (0.0, 0.0, (1.0 / 2.0), (1.0 / (2.0 * sqrt(2.0))))
for (val, desired) in zip(vals, desireds):
x = torch.tensor(val, requires_grad=True)
y = pystiche.nonnegsqrt(x)
y.backward()
assert (x.grad == ptu.approx(desired)) |
def main(client, config):
(date_dim_df, customer_df, s_sales_df, web_sales_df) = benchmark(read_tables, config=config, compute_result=config['get_read_time'])
filtered_date_df = date_dim_df.query('d_year >= _Year and d_year <= _Year_plus', local_dict={'q13_Year': q13_Year, 'q13_Year_plus': (q13_Year + 1)}, meta=date_dim_df._meta).reset_index(drop=True)
s_sales_df = s_sales_df.merge(filtered_date_df, how='inner', left_on='ss_sold_date_sk', right_on='d_date_sk')
web_sales_df = web_sales_df.merge(filtered_date_df, how='inner', left_on='ws_sold_date_sk', right_on='d_date_sk')
s_grouped_df = s_sales_df.groupby(by=['ss_customer_sk', 'd_year']).agg({'ss_net_paid': 'sum'}).reset_index().rename(columns={'ss_net_paid': 'year_total'})
sales_ratio_df = s_grouped_df.map_partitions(get_sales_ratio)
sales_ratio_df = sales_ratio_df.groupby(by='ss_customer_sk').agg({'first_year_sales': 'max', 'second_year_sales': 'max'}).reset_index()
sales_ratio_df = sales_ratio_df.query('first_year_sales>0')
sales_ratio_df['storeSalesIncreaseRatio'] = (sales_ratio_df['second_year_sales'] / sales_ratio_df['first_year_sales'])
sales_ratio_df = sales_ratio_df.drop(['first_year_sales', 'second_year_sales'], axis=1).rename(columns={'ss_customer_sk': 'c_customer_sk'})
web_grouped_df = web_sales_df.groupby(by=['ws_bill_customer_sk', 'd_year']).agg({'ws_net_paid': 'sum'}).reset_index().rename(columns={'ws_net_paid': 'year_total'})
web_ratio_df = web_grouped_df.map_partitions(get_sales_ratio)
web_ratio_df = web_ratio_df.groupby(by='ws_bill_customer_sk').agg({'first_year_sales': 'max', 'second_year_sales': 'max'}).reset_index()
web_ratio_df = web_ratio_df.query('first_year_sales>0')
web_ratio_df['webSalesIncreaseRatio'] = (web_ratio_df['second_year_sales'] / web_ratio_df['first_year_sales'])
web_ratio_df = web_ratio_df.drop(['first_year_sales', 'second_year_sales'], axis=1).rename(columns={'ws_bill_customer_sk': 'c_customer_sk'})
both_sales = sales_ratio_df.merge(web_ratio_df, how='inner', on='c_customer_sk')
customer_df['c_customer_sk'] = customer_df['c_customer_sk'].astype('int64')
both_sales['c_customer_sk'] = both_sales['c_customer_sk'].astype('int64')
final_df = customer_df.merge(both_sales, how='inner', on='c_customer_sk').query('webSalesIncreaseRatio > storeSalesIncreaseRatio')
final_df = final_df.drop('c_customer_id', axis=1)
result_df = final_df.repartition(npartitions=1).persist()
wait(result_df)
result_df = result_df.map_partitions((lambda df: df.sort_values(['webSalesIncreaseRatio', 'c_customer_sk', 'c_first_name', 'c_last_name'], ascending=[False, True, True, True])))
result_df = result_df.reset_index(drop=True)
result_df = result_df.head(q13_limit)
return result_df |
class MySimulatorMaster(SimulatorMaster, Callback):
def __init__(self, pipe_c2s, pipe_s2c, gpus):
super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c)
self.queue = queue.Queue(maxsize=((BATCH_SIZE * 8) * 2))
self._gpus = gpus
def _setup_graph(self):
nr_gpu = len(self._gpus)
predictors = [self.trainer.get_predictor(['role_id', 'policy_state_in', 'value_state_in', 'last_cards_in', 'minor_type_in', 'mode_in'], ['passive_decision_prob', 'passive_bomb_prob', 'passive_response_prob', 'active_decision_prob', 'active_response_prob', 'active_seq_prob', 'minor_response_prob', 'mode_out'], self._gpus[(k % nr_gpu)]) for k in range(PREDICTOR_THREAD)]
self.async_predictor = MultiThreadAsyncPredictor(predictors, batch_size=PREDICT_BATCH_SIZE)
def _before_train(self):
self.async_predictor.start()
def _on_state(self, role_id, prob_state, all_state, last_cards_onehot, mask, minor_type, mode, first_st, client):
def cb(outputs):
try:
output = outputs.result()
except CancelledError:
logger.info('Client {} cancelled.'.format(client.ident))
return
mode = output[(- 1)]
distrib = ((output[:(- 1)][mode] + 1e-06) * mask)
assert np.all(np.isfinite(distrib)), distrib
action = np.random.choice(len(distrib), p=(distrib / distrib.sum()))
client.memory[(role_id - 1)].append(TransitionExperience(prob_state, all_state, action, reward=0, minor_type=minor_type, first_st=first_st, last_cards_onehot=last_cards_onehot, mode=mode, prob=distrib[action]))
self.send_queue.put([client.ident, dumps(action)])
self.async_predictor.put_task([role_id, prob_state, all_state, last_cards_onehot, minor_type, mode], cb)
def _process_msg(self, client, role_id, prob_state, all_state, last_cards_onehot, first_st, mask, minor_type, mode, reward, isOver):
if (isOver and first_st):
assert (reward != 0)
for i in range(3):
if (i != 1):
continue
j = (- 1)
while (client.memory[i][j].reward == 0):
client.memory[i][j].reward = (reward if (i != 1) else (- reward))
if client.memory[i][j].first_st:
break
j -= 1
self._parse_memory(0, client)
self._on_state(role_id, prob_state, all_state, last_cards_onehot, mask, minor_type, mode, first_st, client)
def _parse_memory(self, init_r, client):
for role_id in range(1, 4):
if (role_id != 2):
continue
mem = client.memory[(role_id - 1)]
mem.reverse()
R = float(init_r)
mem_valid = [m for m in mem if m.first_st]
dr = []
for (idx, k) in enumerate(mem_valid):
R = (k.reward + (GAMMA * R))
dr.append(R)
dr.reverse()
mem.reverse()
i = (- 1)
j = 0
while (j < len(mem)):
if mem[j].first_st:
i += 1
target = [0 for _ in range(7)]
k = mem[j]
target[k.mode] = k.action
self.queue.put([role_id, k.prob_state, k.all_state, k.last_cards_onehot, *target, k.minor_type, k.mode, k.prob, dr[i]])
j += 1
client.memory[(role_id - 1)] = [] |
class STVQAANLSEvaluator():
def __init__(self):
import editdistance
self.get_edit_distance = editdistance.eval
def get_anls(self, s1, s2):
s1 = s1.lower().strip()
s2 = s2.lower().strip()
iou = (1 - (self.get_edit_distance(s1, s2) / max(len(s1), len(s2))))
anls = (iou if (iou >= 0.5) else 0.0)
return anls
def eval_pred_list(self, pred_list):
pred_scores = []
for entry in pred_list:
anls = max((self.get_anls(entry['pred_answer'], gt) for gt in entry['gt_answers']))
pred_scores.append(anls)
accuracy = (sum(pred_scores) / len(pred_scores))
return accuracy |
def replace_rvs_by_values(graphs: Sequence[TensorVariable], *, rvs_to_values: Dict[(TensorVariable, TensorVariable)], rvs_to_transforms: Optional[Dict[(TensorVariable, 'Transform')]]=None) -> List[TensorVariable]:
if rvs_to_transforms:
inputs = [i for i in graph_inputs(graphs) if (not isinstance(i, Constant))]
equiv = clone_get_equiv(inputs, graphs, False, False)
graphs = [equiv[g] for g in graphs]
rvs_to_values = {equiv.get(rv, rv): value for (rv, value) in rvs_to_values.items()}
rvs_to_transforms = {equiv.get(rv, rv): transform for (rv, transform) in rvs_to_transforms.items()}
replacements = {}
def populate_replacements(var):
if (not var.owner):
return []
next_vars = []
value = rvs_to_values.get(var, None)
if (value is not None):
rv = var
if (rvs_to_transforms is not None):
transform = rvs_to_transforms.get(rv, None)
if (transform is not None):
value = transform.backward(value, *rv.owner.inputs)
value = rv.type.filter_variable(value, allow_convert=True)
value.name = rv.name
replacements[rv] = value
next_vars.append(value)
next_vars.extend(reversed(var.owner.inputs))
return next_vars
for _ in walk(graphs, populate_replacements, bfs=False):
pass
return replace_vars_in_graphs(graphs, replacements) |
class ForceBalanceFitting(StageBase):
class Config():
validate_assignment = True
arbitrary_types_allowed = True
type: Literal['ForceBalanceFitting'] = 'ForceBalanceFitting'
penalty_type: Literal[('L1', 'L2')] = 'L1'
job_type: str = 'optimize'
max_iterations: PositiveInt = 10
convergence_step_criteria: PositiveFloat = 0.01
convergence_objective_criteria: PositiveFloat = 0.01
convergence_gradient_criteria: PositiveFloat = 0.01
n_criteria: PositiveInt = 1
eig_lowerbound: PositiveFloat = 0.01
finite_difference_h: PositiveFloat = 0.01
penalty_additive: PositiveFloat = 0.1
constrain_charge: bool = False
initial_trust_radius: float = (- 0.25)
minimum_trust_radius: float = 0.05
error_tolerance: PositiveFloat = 1.0
adaptive_factor: PositiveFloat = 0.2
adaptive_damping: PositiveFloat = 1.0
normalize_weights: bool = False
extras: Dict[(str, Any)] = {}
priors: Priors = Priors()
targets: Dict[(str, TorsionProfileSmirnoff)] = {'TorsionProfile_OpenFF': TorsionProfileSmirnoff()}
def start_message(self, **kwargs) -> str:
return 'Performing torsion optimisations using ForceBalance.'
def finish_message(self, **kwargs) -> str:
return 'Torsion optimisation complete.'
def is_available(cls) -> bool:
fb = which_import('forcebalance', return_bool=True, raise_error=True, raise_msg='Please install ForceBalance via `conda install forcebalance -c conda-forge`.')
openmm = which_import('openmm', return_bool=True, raise_error=True, raise_msg='Please install openmm via `conda install openmm -c conda-forge`.')
cctools = which_import('work_queue', return_bool=True, raise_error=True, raise_msg='Please install cctools via `conda install ndcctools -c conda-forge`.')
return (fb and openmm and cctools)
def run(self, molecule: 'Ligand', *args, **kwargs) -> 'Ligand':
return self._run(molecule, *args, **kwargs)
def _run(self, molecule: 'Ligand', **kwargs) -> 'Ligand':
if (not self.targets):
raise ForceBalanceError('No fitting targets have been set for forcebalance, please set at least one target.')
return self._optimise(molecule=molecule, local_options=kwargs.get('local_options', None))
def add_target(self, target: TargetBase) -> None:
if issubclass(type(target), TargetBase):
self.targets[target.target_name] = target
def _optimise(self, molecule: Ligand, local_options=None) -> Ligand:
if (local_options is None):
local_options = LocalResource(cores=1, memory=2)
with forcebalance_setup(folder_name='ForceBalance'):
fitting_folder = os.getcwd()
fitting_targets = {}
os.chdir('targets')
molecules_to_optimise = []
if (molecule.qm_scans is not None):
molecules_to_optimise.append(molecule)
if (molecule.fragments is not None):
molecules_to_optimise.extend(molecule.fragments)
for target in self.targets.values():
for mol in molecules_to_optimise:
target_folders = target.prep_for_fitting(molecule=mol)
if (target.target_name in fitting_targets):
fitting_targets[target.target_name].extend(target_folders)
else:
fitting_targets[target.target_name] = target_folders
os.chdir(fitting_folder)
total_targets = sum((len(torsions) for torsions in fitting_targets.values()))
max_workers = min([total_targets, local_options.cores])
use_workers = (True if (max_workers > 1) else False)
wq_port = self.generate_optimise_in(target_data=fitting_targets, use_workers=use_workers)
self.generate_forcefield(molecule=molecule)
with open('log.txt', 'w') as log:
if use_workers:
import work_queue
workers = work_queue.Factory('local', manager_host_port=f'localhost:{wq_port}')
workers.cores = 1
workers.memory = (local_options.memory / max_workers)
workers.min_workers = 1
workers.max_workers = max_workers
with workers:
subprocess.run('ForceBalance optimize.in', shell=True, stdout=log, stderr=log)
else:
subprocess.run('ForceBalance optimize.in', shell=True, stdout=log, stderr=log)
result_ligand = self.collect_results(molecule=molecule)
return result_ligand
def generate_forcefield(self, molecule: Ligand) -> None:
molecule._optimizeable_offxml(file_name=os.path.join('forcefield', 'bespoke.offxml'), h_constraints=False)
def generate_optimise_in(self, target_data: Dict[(str, List[str])], use_workers: bool) -> Optional[int]:
from jinja2 import Template
template_file = get_data(os.path.join('templates', 'optimize.txt'))
with open(template_file) as file:
template = Template(file.read())
data = self.dict(exclude={'targets', 'priors'})
data['priors'] = self.priors.format_priors()
data['fitting_targets'] = target_data
target_options = {}
for target in self.targets.values():
target_options[target.target_name] = target.fb_options()
data['target_options'] = target_options
wq_port = None
if use_workers:
data['asynchronous'] = True
sock = socket.socket()
sock.bind(('localhost', 0))
wq_port = sock.getsockname()[1]
sock.close()
data['wq_port'] = wq_port
rendered_template = template.render(**data)
with open('optimize.in', 'w') as opt_in:
opt_in.write(rendered_template)
return wq_port
def check_converged() -> bool:
converged = False
with open('optimize.out') as log:
for line in log.readlines():
if ('optimization converged' in line.lower()):
converged = True
break
elif ('convergence failure' in line.lower()):
converged = False
break
return converged
def collect_results(self, molecule: Ligand) -> Ligand:
status = self.check_converged()
if (not status):
raise ForceBalanceError(f'The optimisation for molecule {molecule.name} did not converge so the parameters could not be updated.')
else:
from openff.toolkit.typing.engines.smirnoff import ForceField
from openmm import unit
optimized_ff = ForceField(os.path.join('result', self.job_type, 'bespoke.offxml'), load_plugins=True, allow_cosmetic_attributes=True)
torsion_handler = optimized_ff.get_parameter_handler('ProperTorsions')
for parameter in torsion_handler.parameters:
if parameter.attribute_is_cosmetic('parameterize'):
parameter_data = {'k1': parameter.k1.value_in_unit(unit.kilojoule_per_mole), 'k2': parameter.k2.value_in_unit(unit.kilojoule_per_mole), 'k3': parameter.k3.value_in_unit(unit.kilojoule_per_mole), 'k4': parameter.k4.value_in_unit(unit.kilojoule_per_mole), 'periodicity1': parameter.periodicity1, 'periodicity2': parameter.periodicity2, 'periodicity3': parameter.periodicity3, 'periodicity4': parameter.periodicity4, 'phase1': parameter.phase1.value_in_unit(unit.radian), 'phase2': parameter.phase2.value_in_unit(unit.radian), 'phase3': parameter.phase3.value_in_unit(unit.radian), 'phase4': parameter.phase4.value_in_unit(unit.radian)}
matches = molecule.get_smarts_matches(parameter.smirks)
for match in matches:
molecule.TorsionForce[match].update(**parameter_data)
return molecule |
class RewriteDatabaseQuery():
def __init__(self, include: Iterable[Union[(str, None)]], require: Optional[Union[(OrderedSet, Sequence[str])]]=None, exclude: Optional[Union[(OrderedSet, Sequence[str])]]=None, subquery: Optional[dict[(str, 'RewriteDatabaseQuery')]]=None, position_cutoff: float=math.inf, extra_rewrites: Optional[Sequence[tuple[(Union[('RewriteDatabaseQuery', RewritesType)], Union[(int, float)])]]]=None):
self.include = OrderedSet(include)
self.require = (OrderedSet(require) if require else OrderedSet())
self.exclude = (OrderedSet(exclude) if exclude else OrderedSet())
self.subquery = (subquery or {})
self.position_cutoff = position_cutoff
self.name: Optional[str] = None
if (extra_rewrites is None):
extra_rewrites = []
self.extra_rewrites = list(extra_rewrites)
def __str__(self):
return (((('RewriteDatabaseQuery(' + f'inc={self.include},ex={self.exclude},') + f'require={self.require},subquery={self.subquery},') + f'position_cutoff={self.position_cutoff},') + f'extra_rewrites={self.extra_rewrites})')
def __setstate__(self, state):
self.__dict__.update(state)
if (not hasattr(self, 'extra_rewrites')):
self.extra_rewrites = []
def including(self, *tags: str) -> 'RewriteDatabaseQuery':
return RewriteDatabaseQuery(self.include.union(tags), self.require, self.exclude, self.subquery, self.position_cutoff, self.extra_rewrites)
def excluding(self, *tags: str) -> 'RewriteDatabaseQuery':
return RewriteDatabaseQuery(self.include, self.require, self.exclude.union(tags), self.subquery, self.position_cutoff, self.extra_rewrites)
def requiring(self, *tags: str) -> 'RewriteDatabaseQuery':
return RewriteDatabaseQuery(self.include, self.require.union(tags), self.exclude, self.subquery, self.position_cutoff, self.extra_rewrites)
def register(self, *rewrites: tuple[('RewriteDatabaseQuery', Union[(int, float)])]) -> 'RewriteDatabaseQuery':
return RewriteDatabaseQuery(self.include, self.require, self.exclude, self.subquery, self.position_cutoff, (self.extra_rewrites + list(rewrites))) |
def dtypes():
return [dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('O'), dtype('float64'), dtype('float64'), dtype('float64'), dtype('float64'), dtype('float64'), dtype('int64'), dtype('float64'), dtype('O'), dtype('int64'), dtype('float64'), dtype('int64'), dtype('float64'), dtype('float64'), dtype('int64'), dtype('int64'), dtype('float64'), dtype('int64')] |
def test_stats():
with rasterio.open('tests/data/RGB.byte.tif') as src:
results = stats((src, 1))
assert (results[0] == 0)
assert (results[1] == 255)
assert np.isclose(results[2], 29.9477)
results2 = stats(src.read(1))
assert np.allclose(np.array(results), np.array(results2)) |
def test_maneuver_reader(tmpdir):
tmpcatalog = os.path.join(tmpdir, 'my_catalog.xosc')
cf = xosc.CatalogFile()
cf.create_catalog(tmpcatalog, 'ManeuverCatalog', 'My first miscobject catalog', 'Mandolin')
event = xosc.Event('my_event', xosc.Priority.overwrite)
event.add_action('myaction', xosc.AbsoluteSpeedAction(19, xosc.TransitionDynamics(xosc.DynamicsShapes.linear, xosc.DynamicsDimension.rate, 3)))
event.add_trigger(xosc.EntityTrigger('my_trigger', 3, xosc.ConditionEdge.none, xosc.SpeedCondition(10, xosc.Rule.lessThan), 'ego'))
orig = xosc.Maneuver('my_maneuver')
orig.add_event(event)
cf.add_to_catalog(orig)
cf.dump()
read = xosc.CatalogReader(xosc.CatalogReference('my_catalog', 'my_maneuver'), tmpdir)
assert (read == orig) |
def f1_score(y_pred, y_true, average='micro'):
assert (len(y_pred) == len(y_true))
def _compute_prf(gold, pred):
(TP, FP, FN) = (0, 0, 0)
if (len(gold) != 0):
count = 1
for g in gold:
if (g in pred):
TP += 1
else:
FN += 1
for p in set(pred):
if (p not in gold):
FP += 1
precision = ((TP / float((TP + FP))) if ((TP + FP) != 0) else 0)
recall = ((TP / float((TP + FN))) if ((TP + FN) != 0) else 0)
F1 = ((((2 * precision) * recall) / float((precision + recall))) if ((precision + recall) != 0) else 0)
else:
(precision, recall, F1, count) = (0, 0, 0, 0)
return (TP, FP, FN, F1, count)
def _compute_F1(precision, recall):
F1 = ((((2 * precision) * recall) / float((precision + recall))) if ((precision + recall) != 0) else 0)
return F1
(F1_pred, F1_count, TP_all, FP_all, FN_all) = (0, 0, 0, 0, 0)
for (y_true_item, y_pred_item) in zip(y_true, y_pred):
(single_tp, single_fp, single_fn, single_f1, count) = _compute_prf(y_true_item, y_pred_item)
F1_pred += single_f1
F1_count += count
TP_all += single_tp
FP_all += single_fp
FN_all += single_fn
if (average == 'macro'):
F1_macro_score = (F1_pred / float(F1_count))
return F1_macro_score
elif (average == 'micro'):
P_score = ((TP_all / float((TP_all + FP_all))) if ((TP_all + FP_all) != 0) else 0)
R_score = ((TP_all / float((TP_all + FN_all))) if ((TP_all + FN_all) != 0) else 0)
F1_micro_score = _compute_F1(P_score, R_score)
return F1_micro_score
else:
raise ValueError('Options other than micro/macro are not supported.') |
def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
id_map = None
if (dataset_name is not None):
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(coco_api.getCatIds())
cats = coco_api.loadCats(cat_ids)
thing_classes = [c['name'] for c in sorted(cats, key=(lambda x: x['id']))]
meta.thing_classes = thing_classes
if (not ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids)))):
if ('coco' not in dataset_name):
logger.warning("\nCategory ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n")
id_map = {v: i for (i, v) in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
total_num_valid_anns = sum([len(x) for x in anns])
total_num_anns = len(coco_api.anns)
if (total_num_valid_anns < total_num_anns):
logger.warning(f'{json_file} contains {total_num_anns} annotations, but only {total_num_valid_anns} of them match to images in the file.')
if ('minival' not in json_file):
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique!".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info('Loaded {} images in COCO format from {}'.format(len(imgs_anns), json_file))
dataset_dicts = []
ann_keys = (['iscrowd', 'bbox', 'keypoints', 'category_id'] + (extra_annotation_keys or []))
num_instances_without_valid_segmentation = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record['file_name'] = os.path.join(image_root, img_dict['file_name'])
record['height'] = img_dict['height']
record['width'] = img_dict['width']
image_id = record['image_id'] = img_dict['id']
record['ignore'] = (img_dict['ignore'] if ('ignore' in img_dict) else 0)
record['video_id'] = (img_dict['video_id'] if ('video_id' in img_dict) else (- 1))
record['index'] = (img_dict['frame_id'] if ('frame_id' in img_dict) else (- 1))
objs = []
for anno in anno_dict_list:
assert (anno['image_id'] == image_id)
if (anno.get('ignore', 0) != 0):
continue
assert (anno.get('ignore', 0) == 0), '"ignore" in COCO json file is not supported.'
obj = {key: anno[key] for key in ann_keys if (key in anno)}
segm = anno.get('segmentation', None)
if segm:
if isinstance(segm, dict):
if isinstance(segm['counts'], list):
segm = mask_util.frPyObjects(segm, *segm['size'])
else:
segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(segm) == 0):
num_instances_without_valid_segmentation += 1
continue
obj['segmentation'] = segm
keypts = anno.get('keypoints', None)
if keypts:
for (idx, v) in enumerate(keypts):
if ((idx % 3) != 2):
keypts[idx] = (v + 0.5)
obj['keypoints'] = keypts
obj['bbox_mode'] = BoxMode.XYWH_ABS
if id_map:
obj['category_id'] = id_map[obj['category_id']]
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
if (num_instances_without_valid_segmentation > 0):
logger.warning(('Filtered out {} instances without valid segmentation. '.format(num_instances_without_valid_segmentation) + 'There might be issues in your dataset generation process. A valid polygon should be a list[float] with even length >= 6.'))
return dataset_dicts |
def BackupRestoreSeries(source_local, dest_local, list_of_dirnames, compare_hardlinks=1, dest_dirname=abs_output_dir, restore_dirname=abs_restore_dir, compare_backups=1, compare_eas=0, compare_acls=0, compare_ownership=0):
Globals.set('preserve_hardlinks', compare_hardlinks)
Globals.set('no_compression_regexp_string', os.fsencode(actions.DEFAULT_NOT_COMPRESSED_REGEXP))
time = 10000
dest_rp = rpath.RPath(Globals.local_connection, dest_dirname)
restore_rp = rpath.RPath(Globals.local_connection, restore_dirname)
Myrm(dest_dirname)
for dirname in list_of_dirnames:
src_rp = rpath.RPath(Globals.local_connection, dirname)
reset_hardlink_dicts()
_reset_connections(src_rp, dest_rp)
InternalBackup(source_local, dest_local, dirname, dest_dirname, time, eas=compare_eas, acls=compare_acls)
time += 10000
_reset_connections(src_rp, dest_rp)
if compare_backups:
assert compare_recursive(src_rp, dest_rp, compare_hardlinks, compare_eas=compare_eas, compare_acls=compare_acls, compare_ownership=compare_ownership)
time = 10000
for dirname in list_of_dirnames[:(- 1)]:
reset_hardlink_dicts()
Myrm(restore_dirname)
InternalRestore(dest_local, source_local, dest_dirname, restore_dirname, time, eas=compare_eas, acls=compare_acls)
src_rp = rpath.RPath(Globals.local_connection, dirname)
assert compare_recursive(src_rp, restore_rp, compare_eas=compare_eas, compare_acls=compare_acls, compare_ownership=compare_ownership)
if (time == 20000):
time = 21000
time += 10000 |
class Gradients(uhf_grad.Gradients):
_keys = {'with_df', 'auxbasis_response'}
def __init__(self, mf):
self.auxbasis_response = True
uhf_grad.Gradients.__init__(self, mf)
get_jk = df_rhf_grad.Gradients.get_jk
get_j = df_rhf_grad.Gradients.get_j
get_k = df_rhf_grad.Gradients.get_k
def get_veff(self, mol=None, dm=None):
(vj, vk) = self.get_jk(mol, dm)
vhf = ((vj[0] + vj[1]) - vk)
if self.auxbasis_response:
e1_aux = vj.aux.sum((0, 1))
e1_aux -= numpy.trace(vk.aux, axis1=0, axis2=1)
logger.debug1(self, 'sum(auxbasis response) %s', e1_aux.sum(axis=0))
vhf = lib.tag_array(vhf, aux=e1_aux)
return vhf
def extra_force(self, atom_id, envs):
if self.auxbasis_response:
return envs['vhf'].aux[atom_id]
else:
return 0 |
def reshape_to_matrix(input_tensor):
ndims = input_tensor.shape.ndims
if (ndims < 2):
raise ValueError(('Input tensor must have at least rank 2. Shape = %s' % input_tensor.shape))
if (ndims == 2):
return input_tensor
width = input_tensor.shape[(- 1)]
output_tensor = tf.reshape(input_tensor, [(- 1), width])
return output_tensor |
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if (data is None):
return True
if (isinstance(data, tuple) and (data == ())):
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e+300
while (repr(inf_value) != repr((inf_value * inf_value))):
inf_value *= inf_value
def represent_float(self, data):
if ((data != data) or ((data == 0.0) and (data == 1.0))):
value = u'.nan'
elif (data == self.inf_value):
value = u'.inf'
elif (data == (- self.inf_value)):
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
if ((u'.' not in value) and (u'e' in value)):
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError(('cannot represent an object: %s' % data)) |
def fork(fork_inst: Type[T]=StateHolder, name: Optional[str]=None) -> Type[T]:
fork_inst._fork_counter += 1
if name:
class_name = name
else:
class_name = '{}_fork{}'.format(get_class_name(fork_inst), fork_inst._fork_counter)
result = type(class_name, (fork_inst,), {})
result._classes_serializers = fork_inst._classes_serializers.copy()
result._classes_deserializers = fork_inst._classes_deserializers.copy()
result._serializers = fork_inst._serializers.copy()
result._deserializers = fork_inst._deserializers.copy()
result._fork_counter = 0
result._suppress_warnings = fork_inst._suppress_warnings
result._suppressed_warnings = fork_inst._suppressed_warnings.copy()
return result |
class InhibitAnyPolicy(ExtensionType):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs: int) -> None:
if (not isinstance(skip_certs, int)):
raise TypeError('skip_certs must be an integer')
if (skip_certs < 0):
raise ValueError('skip_certs must be a non-negative integer')
self._skip_certs = skip_certs
def __repr__(self) -> str:
return f'<InhibitAnyPolicy(skip_certs={self.skip_certs})>'
def __eq__(self, other: object) -> bool:
if (not isinstance(other, InhibitAnyPolicy)):
return NotImplemented
return (self.skip_certs == other.skip_certs)
def __hash__(self) -> int:
return hash(self.skip_certs)
def skip_certs(self) -> int:
return self._skip_certs
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self) |
class FullyConnectedDotProject(Mapper):
def __init__(self, n_out, n_project, w_init='glorot_uniform', activation='relu', bias=True):
self.w_init = w_init
self.n_project = n_project
self.activation = activation
self.n_out = n_out
self.bias = bias
def apply(self, is_train, x, mask=None):
bias = ((self.bias is None) or self.bias)
return fully_connected(x, self.n_out, use_bias=bias, activation=get_keras_activation(self.activation), kernel_initializer=_wrap_init(initializers.get(self.w_init))) |
class _LayoutContext():
def __init__(self, layout, document, colors_iter, background_iter):
self.colors_iter = colors_iter
underline_iter = document.get_style_runs('underline')
self.decoration_iter = runlist.ZipRunIterator((background_iter, underline_iter))
self.baseline_iter = runlist.FilteredRunIterator(document.get_style_runs('baseline'), (lambda value: (value is not None)), 0) |
def imppid(args):
if (args['pid'] == None):
logging.error('A pid has to be selected')
else:
printT('Impersonating primary token of pid {0}'.format(args['pid']))
imp = Impersonate()
imp.enableAllUserRights()
status = imp.impersonateViaPID(pid=args['pid'])
if (status == True):
printT('Trying to open a cmd shell...')
printT("NOTICE: If not enough privileges for targeted pid, you can't open a cmd.exe shell")
imp.printCurrentThreadEffectiveToken()
imp.enableAllUserRights()
imp.executeCMDWithThreadEffectiveToken()
else:
logging.error('Impossible to impersonate') |
def test_scene_to_svg_exporter_render_with_worker_canceled(view):
item = BeeTextItem('foo')
item.setPos(QtCore.QPointF(20, 30))
view.scene.addItem(item)
exporter = SceneToSVGExporter(view.scene)
exporter.size = QtCore.QSize(200, 400)
exporter.margin = 5
worker = MagicMock(canceled=True)
svg = exporter.render_to_svg(worker=worker)
assert (svg is None) |
def get_xception(model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
channels = [[128], [256], ([728] * 9), [1024]]
net = Xception(channels=channels, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def launch_openroad():
global process
executable_path = os.path.abspath(os.path.join(os.getcwd(), '../../../../cmake-build-release/src'))
process = subprocess.Popen([f'{executable_path}/openroad', '-exit', '/home/plan/eda/OpenROAD/src/drt/test/results/ispd18_test1/run-net-ordering-train.tcl'], cwd=executable_path, stdout=subprocess.PIPE)
select_obj = select.poll()
select_obj.register(process.stdout, select.POLLIN)
while True:
ready = select_obj.poll(1000)
if ready:
output = process.stdout.readline().decode().strip()
print(output)
if (process.poll() is not None):
break
time.sleep(1) |
class FpnCombine(nn.Module):
def __init__(self, feature_info, fpn_config, fpn_channels, inputs_offsets, target_reduction, pad_type='', pooling_type='max', norm_layer=nn.BatchNorm2d, apply_bn_for_resampling=False, conv_after_downsample=False, redundant_bias=False, weight_method='attn'):
super(FpnCombine, self).__init__()
self.inputs_offsets = inputs_offsets
self.weight_method = weight_method
self.resample = nn.ModuleDict()
for (idx, offset) in enumerate(inputs_offsets):
in_channels = fpn_channels
if (offset < len(feature_info)):
in_channels = feature_info[offset]['num_chs']
input_reduction = feature_info[offset]['reduction']
else:
node_idx = (offset - len(feature_info))
input_reduction = fpn_config.nodes[node_idx]['reduction']
reduction_ratio = (target_reduction / input_reduction)
self.resample[str(offset)] = ResampleFeatureMap(in_channels, fpn_channels, reduction_ratio=reduction_ratio, pad_type=pad_type, pooling_type=pooling_type, norm_layer=norm_layer, apply_bn=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample, redundant_bias=redundant_bias)
if ((weight_method == 'attn') or (weight_method == 'fastattn')):
self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True)
else:
self.edge_weights = None
def forward(self, x: List[torch.Tensor]):
dtype = x[0].dtype
nodes = []
for (offset, resample) in zip(self.inputs_offsets, self.resample.values()):
input_node = x[offset]
input_node = resample(input_node)
nodes.append(input_node)
if (self.weight_method == 'attn'):
normalized_weights = torch.softmax(self.edge_weights.to(dtype=dtype), dim=0)
out = (torch.stack(nodes, dim=(- 1)) * normalized_weights)
elif (self.weight_method == 'fastattn'):
edge_weights = nn.functional.relu(self.edge_weights.to(dtype=dtype))
weights_sum = torch.sum(edge_weights)
out = torch.stack([((nodes[i] * edge_weights[i]) / (weights_sum + 0.0001)) for i in range(len(nodes))], dim=(- 1))
elif (self.weight_method == 'sum'):
out = torch.stack(nodes, dim=(- 1))
else:
raise ValueError('unknown weight_method {}'.format(self.weight_method))
out = torch.sum(out, dim=(- 1))
return out |
def load_zip_file_keys(file, fileNameRegExp=''):
try:
archive = zipfile.ZipFile(file, mode='r', allowZip64=True)
except:
raise Exception('Error loading the ZIP archive.')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if (fileNameRegExp != ''):
m = re.match(fileNameRegExp, name)
if (m == None):
addFile = False
elif (len(m.groups()) > 0):
keyName = m.group(1)
if addFile:
pairs.append(keyName)
return pairs |
(frozen=True)
class ReFieldNameRC(LocatedRequestChecker):
LOCATION = FieldLoc
pattern: Pattern[str]
def _check_location(self, mediator: DirectMediator, loc: FieldLoc) -> None:
if self.pattern.fullmatch(loc.field_id):
return
raise CannotProvide(f'field_id must be matched by {self.pattern!r}') |
class StreamBlocksAdminMixin():
change_form_template = 'streamfield/admin/change_form.html'
popup_response_template = 'streamfield/admin/streamfield_popup_response.html'
def response_add(self, request, obj, post_url_continue=None):
if ('block_id' in request.POST):
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = (str(to_field) if to_field else opts.pk.attname)
value = obj.serializable_value(attr)
popup_response_data = json.dumps({'app_id': request.POST.get('app_id'), 'block_id': request.POST.get('block_id'), 'instance_id': str(value)})
return TemplateResponse(request, self.popup_response_template, {'popup_response_data': popup_response_data})
return super().response_add(request, obj, post_url_continue)
def response_change(self, request, obj):
if ('block_id' in request.POST):
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = (str(to_field) if to_field else opts.pk.attname)
value = request.resolver_match.kwargs['object_id']
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps({'action': 'change', 'app_id': request.POST.get('app_id'), 'block_id': request.POST.get('block_id'), 'instance_id': request.POST.get('instance_id')})
return TemplateResponse(request, self.popup_response_template, {'popup_response_data': popup_response_data})
return super().response_change(request, obj)
def response_delete(self, request, obj_display, obj_id):
if ('block_id' in request.POST):
popup_response_data = json.dumps({'action': 'delete', 'value': str(obj_id), 'app_id': request.POST.get('app_id'), 'block_id': request.POST.get('block_id'), 'instance_id': request.POST.get('instance_id')})
return TemplateResponse(request, self.popup_response_template, {'popup_response_data': popup_response_data})
return super().response_delete(request, obj_display, obj_id) |
def _render_month(calendar, year, month, print_year):
import pandas as pd
if (sys.version_info[0] == 2):
import StringIO
out = StringIO.StringIO()
else:
import io
out = io.StringIO()
start = '{year}-{month}'.format(year=year, month=month)
if (month == 12):
end = '{year}-{month}'.format(year=(year + 1), month=1)
else:
end = '{year}-{month}'.format(year=year, month=(month + 1))
days = pd.date_range(start, end, closed='left')
title = months[(month - 1)]
if print_year:
title += ' {year}'.format(year=year)
print('{title:^28}'.format(title=title).rstrip(), file=out)
print(' Su Mo Tu We Th Fr Sa', file=out)
print((' ' * (4 * ((days[0].weekday() + 1) % 7))), end='', file=out)
for d in days:
if (d.weekday() == 6):
print('', file=out)
if calendar.is_session(d):
a = b = ' '
else:
a = '['
b = ']'
print('{a}{d.day:>2}{b}'.format(a=a, d=d, b=b), end='', file=out)
print('', file=out)
return out.getvalue() |
def gen_sqlalchemy_metadata(peewee_model_list, legacy_index_map=None):
metadata = MetaData(naming_convention={'ix': 'ix_%(column_0_label)s', 'uq': 'uq_%(table_name)s_%(column_0_name)s', 'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s', 'pk': 'pk_%(table_name)s'})
for model in peewee_model_list:
meta = model._meta
all_indexes = set(meta.indexes)
fulltext_indexes = []
columns = []
for field in meta.sorted_fields:
alchemy_type = None
col_args = []
col_kwargs = {}
if isinstance(field, PrimaryKeyField):
alchemy_type = Integer
elif isinstance(field, CharField):
alchemy_type = String(field.max_length)
elif isinstance(field, BooleanField):
alchemy_type = Boolean
elif isinstance(field, DateTimeField):
alchemy_type = DateTime
elif isinstance(field, DateField):
alchemy_type = Date
elif isinstance(field, TextField):
alchemy_type = Text
elif isinstance(field, ForeignKeyField):
alchemy_type = Integer
all_indexes.add(((field.name,), field.unique))
if (not field.deferred):
target_name = ('%s.%s' % (field.rel_model._meta.table_name, field.rel_field.column_name))
col_args.append(ForeignKey(target_name))
elif isinstance(field, BigIntegerField):
alchemy_type = BigInteger
elif isinstance(field, IntegerField):
alchemy_type = Integer
else:
raise RuntimeError(('Unknown column type: %s' % field))
if hasattr(field, '__fulltext__'):
fulltext_indexes.append(field.name)
for option_name in OPTIONS_TO_COPY:
alchemy_option_name = (OPTION_TRANSLATIONS[option_name] if (option_name in OPTION_TRANSLATIONS) else option_name)
if (alchemy_option_name not in col_kwargs):
option_val = getattr(field, option_name)
col_kwargs[alchemy_option_name] = option_val
if (field.unique or field.index):
all_indexes.add(((field.name,), field.unique))
new_col = Column(field.column_name, alchemy_type, *col_args, **col_kwargs)
columns.append(new_col)
new_table = Table(meta.table_name, metadata, *columns)
for (col_prop_names, unique) in all_indexes:
col_names = [meta.fields[prop_name].column_name for prop_name in col_prop_names]
index_name = ('%s_%s' % (meta.table_name, '_'.join(col_names)))
col_refs = [getattr(new_table.c, col_name) for col_name in col_names]
Index(normalize_index_name(index_name, legacy_index_map), *col_refs, unique=unique)
for col_field_name in fulltext_indexes:
index_name = ('%s_%s__fulltext' % (meta.table_name, col_field_name))
col_ref = getattr(new_table.c, col_field_name)
Index(normalize_index_name(index_name, legacy_index_map), col_ref, postgresql_ops={col_field_name: 'gin_trgm_ops'}, postgresql_using='gin', mysql_prefix='FULLTEXT')
return metadata |
class RemoteLoader():
def main(*argw) -> None:
remoteControl: RemoteControlWithUndo = RemoteControlWithUndo()
livingRoomLight: Light = Light('Living Room')
livingRoomLightOn: LightOnCommand = LightOnCommand(livingRoomLight)
livingRoomLightOff: LightOffCommand = LightOffCommand(livingRoomLight)
remoteControl.setCommand(0, livingRoomLightOn, livingRoomLightOff)
remoteControl.onButtonWasPushed(0)
remoteControl.offButtonWasPushed(0)
print(remoteControl.toString())
remoteControl.undoButtonWasPushed()
remoteControl.offButtonWasPushed(0)
remoteControl.onButtonWasPushed(0)
print(remoteControl.toString())
remoteControl.undoButtonWasPushed()
ceilingFan: CeilingFan = CeilingFan('Living Room')
ceilingFanMedium: CeilingFanMediumCommand = CeilingFanMediumCommand(ceilingFan)
ceilingFanHigh: CeilingFanHighCommand = CeilingFanHighCommand(ceilingFan)
ceilingFanOff: CeilingFanOffCommand = CeilingFanOffCommand(ceilingFan)
remoteControl.setCommand(0, ceilingFanMedium, ceilingFanOff)
remoteControl.setCommand(1, ceilingFanHigh, ceilingFanOff)
remoteControl.onButtonWasPushed(0)
remoteControl.offButtonWasPushed(0)
print(remoteControl.toString())
remoteControl.undoButtonWasPushed()
remoteControl.onButtonWasPushed(1)
print(remoteControl.toString())
remoteControl.undoButtonWasPushed() |
class SpaceTest(unittest.TestCase):
def setUp(self):
logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
def test_make_two_spaces(self):
log = logging.getLogger(__name__)
log.debug('test_make_two_spaces')
space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
root1 = space1.create_root_key()
space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
root2 = space2.create_root_key()
root3 = space2.create_root_key()
log.debug(('%08x' % root1))
log.debug(('%08x' % root2))
log.debug(('%08x' % root3))
def test_flush_context(self):
log = logging.getLogger(__name__)
log.debug('test_flush_context')
space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
root1 = space1.create_root_key()
log.debug(('%08x' % root1))
space1.flush_context(root1)
def test_get_handles(self):
log = logging.getLogger(__name__)
log.debug('test_get_handles')
space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
space1.create_root_key()
space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
space2.create_root_key()
space2.create_root_key()
handles = space2.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_TRANSIENT)
self.assertEqual(len(handles), 2)
log.debug(('%08x' % handles[0]))
log.debug(('%08x' % handles[1]))
def test_invalid_cc(self):
log = logging.getLogger(__name__)
log.debug(sys._getframe().f_code.co_name)
TPM2_CC_INVALID = (tpm2.TPM2_CC_FIRST - 1)
space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
root1 = space1.create_root_key()
log.debug(('%08x' % root1))
fmt = '>HII'
cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_INVALID)
rc = 0
try:
space1.send_cmd(cmd)
except ProtocolError as e:
rc = e.rc
self.assertEqual(rc, (tpm2.TPM2_RC_COMMAND_CODE | tpm2.TSS2_RESMGR_TPM_RC_LAYER)) |
_config
def test_labelgroup(manager):
manager.c.group['a'].toscreen()
assert (manager.c.group['a'].info()['label'] == 'a')
manager.c.labelgroup()
manager.c.widget['prompt'].fake_keypress('b')
manager.c.widget['prompt'].fake_keypress('Return')
assert (manager.c.group['a'].info()['label'] == 'b')
manager.c.labelgroup()
manager.c.widget['prompt'].fake_keypress('Return')
assert (manager.c.group['a'].info()['label'] == 'a') |
class PersistentSearchControl(RequestControl):
class PersistentSearchControlValue(univ.Sequence):
componentType = namedtype.NamedTypes(namedtype.NamedType('changeTypes', univ.Integer()), namedtype.NamedType('changesOnly', univ.Boolean()), namedtype.NamedType('returnECs', univ.Boolean()))
controlType = '2.16.840.1.113730.3.4.3'
def __init__(self, criticality=True, changeTypes=None, changesOnly=False, returnECs=True):
(self.criticality, self.changesOnly, self.returnECs) = (criticality, changesOnly, returnECs)
self.changeTypes = (changeTypes or CHANGE_TYPES_INT.values())
def encodeControlValue(self):
if (not (type(self.changeTypes) == type(0))):
changeTypes_int = 0
for ct in self.changeTypes:
changeTypes_int = (changeTypes_int | CHANGE_TYPES_INT.get(ct, ct))
self.changeTypes = changeTypes_int
p = self.PersistentSearchControlValue()
p.setComponentByName('changeTypes', univ.Integer(self.changeTypes))
p.setComponentByName('changesOnly', univ.Boolean(self.changesOnly))
p.setComponentByName('returnECs', univ.Boolean(self.returnECs))
return encoder.encode(p) |
def get_size_during_upload(repo_id: int):
query = BlobUpload.select(fn.Sum(BlobUpload.byte_count).alias('size_bytes')).where((BlobUpload.repository_id == repo_id)).get()
repo_size = get_repository_size(repo_id)
size_bytes = (query.size_bytes if (query.size_bytes is not None) else 0)
return (repo_size + size_bytes) |
def get_parser():
parser = argparse.ArgumentParser(description='transforms features via a given pca and stored them in target dir')
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True)
parser.add_argument('--batch-size', type=int, default=2048000, help='batch size')
parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version')
return parser |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (jax.process_index() == 0):
wandb.init(project='lang-reward', group=os.environ.get('WANDB_TASK'), config={**vars(model_args), **vars(data_args), **vars(training_args)})
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.setLevel((logging.INFO if (jax.process_index() == 0) else logging.ERROR))
if (jax.process_index() == 0):
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
dataset = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False, use_auth_token=(True if model_args.use_auth_token else None))
if ('validation' not in dataset.keys()):
dataset['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
dataset['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
dataset_args = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.train_file.split('.')[(- 1)]
if (extension == 'txt'):
extension = 'text'
dataset_args['keep_linebreaks'] = data_args.keep_linebreaks
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, **dataset_args, use_auth_token=(True if model_args.use_auth_token else None))
if ('validation' not in dataset.keys()):
print(f'Validation not in dataset.keys(); creating validation split by splitting train by {data_args.validation_split_percentage}%')
dataset['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, **dataset_args, use_auth_token=(True if model_args.use_auth_token else None))
dataset['train'] = load_dataset(extension, data_files=data_files, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, **dataset_args, use_auth_token=(True if model_args.use_auth_token else None))
if model_args.config_name:
config = CodeGenRLConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
elif model_args.model_name_or_path:
config = CodeGenRLConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, use_auth_token=(True if model_args.use_auth_token else None))
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, use_auth_token=(True if model_args.use_auth_token else None))
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if (tokenizer.pad_token is None):
tokenizer.pad_token = tokenizer.eos_token
logger.info('Setting `pad_token` to `eos_token`:{} for open-end generation.'.format(tokenizer.eos_token))
model: FlaxCodeGenRLForCausalLM = FlaxCodeGenRLForCausalLM(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), _do_init=False)
if data_args.is_reward_model:
assert config.is_reward_model, 'config.is_reward_model must be True when data_args.is_reward_model is True'
if training_args.do_train:
column_names = dataset['train'].column_names
else:
column_names = dataset['validation'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
tok_logger = transformers.utils.logging.get_logger('transformers.tokenization_utils_base')
num_epochs = int(training_args.num_train_epochs)
n_replica = (jax.device_count() // model_args.num_partitions)
train_batch_size = (int(training_args.per_replica_batch_size) * n_replica)
eval_batch_size = train_batch_size
def tokenize_function(examples, batch_size=train_batch_size):
with CaptureLogger(tok_logger) as cl:
if config.is_reward_model:
text_a = examples['text_a']
text_a = [f'{tokenizer.bos_token}{txt}{tokenizer.eos_token}' for txt in text_a]
text_b = examples['text_b']
text = [f'{txt_a}{txt_b}' for (txt_a, txt_b) in zip(text_a, text_b)]
reward_offset_mapping: List[Mapping[(Tuple[(int, int)], Union[(float, None)])]] = [{tuple(map(int, k[1:(- 1)].split(','))): v for (k, v) in json.loads(mapping_str).items()} for mapping_str in examples['reward_offset_mapping']]
prefix_length = [len(txt) for txt in text_a]
reward_offset_mapping = [{((k[0] + cur_prefix_len), (k[1] + cur_prefix_len)): v for (k, v) in offset_mapping.items()} for (offset_mapping, cur_prefix_len) in zip(reward_offset_mapping, prefix_length)]
output = tokenizer(text, padding=True, truncation=True, return_tensors='np', pad_to_multiple_of=256, return_offsets_mapping=True)
output = dict(output)
else:
text = examples[text_column_name]
text = [f'{tokenizer.bos_token}{txt}{tokenizer.eos_token}' for txt in text]
output = tokenizer(text, padding=True, truncation=True, return_tensors='np', pad_to_multiple_of=256)
output['id'] = examples['id']
num_examples = len(output['id'])
if config.is_reward_model:
(reward_mask, reward_val) = get_reward_mask_and_value(shape=output['input_ids'].shape, token_offset_mapping=output['offset_mapping'], reward_offset_mapping=reward_offset_mapping)
output['reward_mask'] = reward_mask
output['reward_val'] = reward_val
output.pop('offset_mapping')
else:
output['labels'] = output['input_ids'].copy()
output['labels'][np.where((output['attention_mask'] == 0))] = (- 100)
output = pad_to_batch_size(output, num_examples=num_examples, batch_size=batch_size, tokenizer=tokenizer)
output['num_examples'] = np.array(([num_examples] * batch_size), dtype=np.int32)
output['num_tokens'] = np.array(np.sum(output['attention_mask'], axis=1), dtype=np.int32)
return output
dataset['train'] = dataset['train'].shuffle(seed=training_args.seed)
tokenized_datasets = dataset.map(tokenize_function, batched=True, batch_size=train_batch_size, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
lm_datasets = tokenized_datasets
if config.is_reward_model:
examples = tokenized_datasets['train'].select(range(3))
for ex in examples:
print(f"Visualizing reward model example {ex['id']}")
visualize_rm_example(**ex, tokenizer=tokenizer)
if training_args.do_train:
if ('train' not in tokenized_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = lm_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
seq_lens = np.array([len(x) for x in train_dataset['input_ids']])
print('Train dataset seq len distribution:')
(seq_lens, len_count) = np.unique(seq_lens, return_counts=True)
for i in range(len(seq_lens)):
print(f'- {seq_lens[i]}: {len_count[i]}')
if training_args.do_eval:
if ('validation' not in tokenized_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = lm_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
seq_lens = np.array([len(x) for x in eval_dataset['input_ids']])
print('Eval dataset seq len distribution:')
(seq_lens, len_count) = np.unique(seq_lens, return_counts=True)
for i in range(len(seq_lens)):
print(f'- {seq_lens[i]}: {len_count[i]}')
rng = jax.random.PRNGKey(training_args.seed)
linear_decay_lr_schedule_fn = create_learning_rate_fn(len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, training_args.learning_rate_end)
if (training_args.learning_rate == training_args.learning_rate_end):
logger.info(f'Using constant learning rate of {training_args.learning_rate}')
def decay_mask_fn(params):
flat_params = flax.traverse_util.flatten_dict(params)
layer_norm_candidates = ['layernorm', 'layer_norm', 'ln']
layer_norm_named_params = set([layer[(- 2):] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if (layer_norm_name in ''.join(layer).lower())])
flat_mask = {path: ((path[(- 1)] != 'bias') and (path[(- 2):] not in layer_norm_named_params)) for path in flat_params}
return flax.traverse_util.unflatten_dict(flat_mask)
optimizer = t5x_optimizers.adamw(learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn)
logical_axis_rules_full = [('batch', 'data'), ('mlp', 'model'), ('heads', 'model'), ('vocab', 'model'), ('embed', 'model'), ('embed', 'data'), ('kv', None), ('joined_kv', None), ('relpos_buckets', None), ('abspos_buckets', None), ('length', None), ('layers', None), ('stack', None), ('mlp_activations', None)]
partitioner = partitioning.PjitPartitioner(num_partitions=model_args.num_partitions, logical_axis_rules=logical_axis_rules_full)
def _init_variables(rng: jax.random.KeyArray, *unused_args, **unused_kwargs) -> flax.core.scope.FrozenVariableDict:
initial_vars = model.init_weights(rng, input_shape=(1, 1))
return initial_vars
train_state_initializer = t5x_utils.TrainStateInitializer(optimizer_def=optimizer, partitioner=partitioner, init_fn=_init_variables, input_shapes=None, input_types=None)
checkpoint_cfg = t5x_utils.CheckpointConfig(save=t5x_utils.SaveCheckpointConfig(dtype='bfloat16', period=training_args.save_steps), restore=t5x_utils.RestoreCheckpointConfig(path=training_args.gs_output_dir, mode='latest', dtype='bfloat16'))
(valid_restore_cfg, restore_paths) = t5x_utils.get_first_valid_restore_config_and_paths([checkpoint_cfg.restore])
checkpoint_manager = t5x_utils.LegacyCheckpointManager(save_cfg=checkpoint_cfg.save, restore_cfg=valid_restore_cfg, train_state_shape=train_state_initializer.global_train_state_shape, partitioner=partitioner, ds_iter=None, model_dir=training_args.gs_output_dir)
(rng, fallback_init_rng) = jax.random.split(rng)
train_state: t5x_train_state_lib.TrainState = checkpoint_manager.restore(restore_paths, restore_cfg=valid_restore_cfg, fallback_state=train_state_initializer.from_scratch(fallback_init_rng).state_dict())
assert (train_state is not None), 'Failed to restore train state'
(rng, dropout_rng) = jax.random.split(rng)
loss_fn_w_model = functools.partial(loss_fn, model=model)
def train_step(state: t5x_train_state_lib.TrainState, batch, dropout_rng=dropout_rng):
(loss, grad, metrics) = accumulate_grads_microbatched(loss_fn_w_model, train_state=state, batch=batch, data_partition_spec=partitioner.data_partition_spec, dropout_rng=dropout_rng, num_microbatches=training_args.num_microbatches)
num_tokens = batch['num_tokens'].sum()
def _normalize(loss, grad, metrics, num_tokens):
loss = (loss / num_tokens)
grad = jax.tree_map((lambda x: (x / num_tokens)), grad)
metrics = jax.tree_map((lambda x: (x / num_tokens)), metrics)
return (loss, grad, metrics)
(loss, grad, metrics) = jax.lax.cond((num_tokens > 0), (lambda x: _normalize(*x)), (lambda x: x[:(- 1)]), (loss, grad, metrics, num_tokens))
new_state = state.apply_gradient(grads=grad, learning_rate=None)
cur_lr = linear_decay_lr_schedule_fn(state.step)
metrics = {'lm_loss': loss, 'learning_rate': cur_lr, 'num_tokens': num_tokens, **metrics}
return (new_state, metrics)
def eval_step(state: t5x_train_state_lib.TrainState, batch):
(loss, _, metrics) = accumulate_grads_microbatched(functools.partial(loss_fn_w_model, train=False), train_state=state, batch=batch, data_partition_spec=partitioner.data_partition_spec, dropout_rng=dropout_rng, num_microbatches=training_args.num_microbatches, loss_only=True)
metrics = {'lm_loss': loss, 'num_tokens': batch['num_tokens'].sum(), **metrics}
return metrics
p_train_step = partitioner.partition(train_step, in_axis_resources=(train_state_initializer.train_state_axes, partitioner.data_partition_spec), out_axis_resources=(train_state_initializer.train_state_axes, None), donate_argnums=(0,))
p_eval_step = partitioner.partition(eval_step, in_axis_resources=(train_state_initializer.train_state_axes, partitioner.data_partition_spec), out_axis_resources=None)
steps_per_epoch = (len(train_dataset) // train_batch_size)
total_train_steps = (steps_per_epoch * num_epochs)
def eval_epoch(p_eval_step, train_state, eval_dataset, eval_batch_size):
eval_metrics = None
eval_loader = dataloader(eval_dataset, eval_batch_size)
eval_steps = math.ceil((len(eval_dataset) / eval_batch_size))
for _ in tqdm(range(eval_steps), desc='Evaluating...', position=2, leave=False):
batch = next(eval_loader)
metrics = p_eval_step(train_state, batch)
if (eval_metrics is None):
eval_metrics = metrics
else:
eval_metrics = jax.tree_map((lambda x, y: (x + y)), eval_metrics, metrics)
eval_metrics['lm_loss'] = (eval_metrics['lm_loss'] / eval_metrics['num_tokens'])
try:
eval_metrics['perplexity'] = math.exp(eval_metrics['lm_loss'])
except OverflowError:
eval_metrics['perplexity'] = float('inf')
return eval_metrics
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {num_epochs}')
logger.info(f' Num of devices = {jax.device_count()}')
logger.info(f' Num of replicas = {n_replica}')
logger.info(f' Num of partitions = {model_args.num_partitions}')
logger.info(f' Num of microbatches = {training_args.num_microbatches}')
logger.info(f' Instantaneous batch size per replica = {training_args.per_replica_batch_size}')
logger.info(f' Total train batch size (w. parallel & distributed) = {train_batch_size}')
logger.info(f' Total optimization steps = {total_train_steps}')
train_time = 0
epochs = tqdm(range(num_epochs), desc='Epoch ... ', position=0)
steps_per_epoch = math.ceil((len(train_dataset) / train_batch_size))
if (training_args.eval_steps is None):
training_args.eval_steps = steps_per_epoch
if (training_args.save_steps is None):
training_args.save_steps = steps_per_epoch
if training_args.do_eval:
eval_metrics = eval_epoch(p_eval_step, train_state, eval_dataset, eval_batch_size)
desc = f"Step... ({0} | Eval Loss: {eval_metrics['lm_loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
epochs.write(desc)
epochs.desc = desc
if (jax.process_index() == 0):
wandb.log({'eval/lm_loss': eval_metrics['lm_loss'], 'eval/perplexity': eval_metrics['perplexity']}, step=0)
for epoch in epochs:
train_start = time.time()
train_loader = dataloader(train_dataset, train_batch_size)
for step in tqdm(range(1, (steps_per_epoch + 1)), desc='Training...', position=1, leave=False):
batch = next(train_loader)
(train_state, train_metric) = p_train_step(train_state, batch)
cur_step = ((epoch * steps_per_epoch) + step)
if (((cur_step % training_args.logging_steps) == 0) and (cur_step > 0)):
if (jax.process_index() == 0):
train_time += (time.time() - train_start)
wandb.log({'train/lm_loss': train_metric['lm_loss'], 'train/learning_rate': train_metric['learning_rate'], 'train/time': train_time}, step=cur_step)
epochs.write(f"Step... ({cur_step} | Loss: {train_metric['lm_loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})")
if (training_args.do_eval and ((cur_step % training_args.eval_steps) == 0) and (cur_step > 0)):
eval_metrics = eval_epoch(p_eval_step, train_state, eval_dataset, eval_batch_size)
desc = f"Step... ({cur_step} | Eval Loss: {eval_metrics['lm_loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
epochs.write(desc)
epochs.desc = desc
if (jax.process_index() == 0):
wandb.log({'eval/lm_loss': eval_metrics['lm_loss'], 'eval/perplexity': eval_metrics['perplexity']}, step=cur_step)
if (((cur_step % training_args.save_steps) == 0) and (cur_step > 0)):
if (jax.process_index() == 0):
checkpoint_manager.save(train_state)
if training_args.do_eval:
eval_metrics = eval_epoch(p_eval_step, train_state, eval_dataset, eval_batch_size)
if (jax.process_index() == 0):
eval_metrics = {f'final_eval/{metric_name}': value for (metric_name, value) in eval_metrics.items()}
path = os.path.join(training_args.gs_output_dir, 'eval_results.json')
wandb.log(eval_metrics, step=cur_step)
with tf.io.gfile.GFile(path, 'w') as f:
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, jax.Array):
obj = np.array(obj)
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
json.dump(eval_metrics, f, indent=4, sort_keys=True, cls=NumpyEncoder) |
def _create_playlists() -> None:
local_files = ArchivedSong.objects.filter(url__startswith='local_library').count()
library_link = os.path.join(conf.SONGS_CACHE_DIR, 'local_library')
library_path = os.path.abspath(library_link)
logging.info('started creating playlists in %s', library_path)
_set_scan_progress(f'{local_files} / 0 / 0')
scan_start = time.time()
last_update = scan_start
files_processed = 0
files_added = 0
def _scan_folder(dirpath: str) -> List[str]:
nonlocal last_update, files_processed, files_added
now = time.time()
if ((now - last_update) > UPDATE_FREQUENCY):
last_update = now
_set_scan_progress(f'{local_files} / {files_processed} / {files_added}')
song_urls = []
for filename in sorted(os.listdir(dirpath)):
path = os.path.join(dirpath, filename)
if os.path.isdir(path):
song_urls.extend(_scan_folder(path))
continue
library_relative_path = path[(len(library_path) + 1):]
external_url = os.path.join('local_library', library_relative_path)
if ArchivedSong.objects.filter(url=external_url).exists():
files_processed += 1
song_urls.append(external_url)
if (not song_urls):
return []
playlist_id = os.path.join('local_library', dirpath[(len(library_path) + 1):])
playlist_title = os.path.split(dirpath)[1]
(playlist, created) = ArchivedPlaylist.objects.get_or_create(list_id=playlist_id, title=playlist_title, counter=0)
if (not created):
return song_urls
song_index = 0
for external_url in song_urls:
PlaylistEntry.objects.create(playlist=playlist, index=song_index, url=external_url)
files_added += 1
song_index += 1
return song_urls
_scan_folder(library_path)
_set_scan_progress(f'{local_files} / {files_processed} / {files_added}') |
class vec3():
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return (((((('(' + str(self.x)) + ', ') + str(self.y)) + ', ') + str(self.z)) + ')')
def __add__(self, v):
if isinstance(v, vec3):
return vec3((self.x + v.x), (self.y + v.y), (self.z + v.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((self.x + v), (self.y + v), (self.z + v))
def __radd__(self, v):
if isinstance(v, vec3):
return vec3((self.x + v.x), (self.y + v.y), (self.z + v.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((self.x + v), (self.y + v), (self.z + v))
def __sub__(self, v):
if isinstance(v, vec3):
return vec3((self.x - v.x), (self.y - v.y), (self.z - v.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((self.x - v), (self.y - v), (self.z - v))
def __rsub__(self, v):
if isinstance(v, vec3):
return vec3((v.x - self.x), (v.y - self.y), (v.z - self.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((v - self.x), (v - self.y), (v - self.z))
def __mul__(self, v):
if isinstance(v, vec3):
return vec3((self.x * v.x), (self.y * v.y), (self.z * v.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((self.x * v), (self.y * v), (self.z * v))
def __rmul__(self, v):
if isinstance(v, vec3):
return vec3((v.x * self.x), (v.y * self.y), (v.z * self.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((v * self.x), (v * self.y), (v * self.z))
def __truediv__(self, v):
if isinstance(v, vec3):
return vec3((self.x / v.x), (self.y / v.y), (self.z / v.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((self.x / v), (self.y / v), (self.z / v))
def __rtruediv__(self, v):
if isinstance(v, vec3):
return vec3((v.x / self.x), (v.y / self.y), (v.z / self.z))
elif (isinstance(v, numbers.Number) or isinstance(v, np.ndarray)):
return vec3((v / self.x), (v / self.y), (v / self.z))
def __abs__(self):
return vec3(np.abs(self.x), np.abs(self.y), np.abs(self.z))
def real(v):
return vec3(np.real(v.x), np.real(v.y), np.real(v.z))
def imag(v):
return vec3(np.imag(v.x), np.imag(v.y), np.imag(v.z))
def yzx(self):
return vec3(self.y, self.z, self.x)
def xyz(self):
return vec3(self.x, self.y, self.z)
def zxy(self):
return vec3(self.z, self.x, self.y)
def xyz(self):
return vec3(self.x, self.y, self.z)
def average(self):
return (((self.x + self.y) + self.z) / 3)
def matmul(self, matrix):
if isinstance(self.x, numbers.Number):
return array_to_vec3(np.dot(matrix, self.to_array()))
elif isinstance(self.x, np.ndarray):
return array_to_vec3(np.tensordot(matrix, self.to_array(), axes=[1, 0]))
def change_basis(self, new_basis):
return vec3(self.dot(new_basis[0]), self.dot(new_basis[1]), self.dot(new_basis[2]))
def __pow__(self, a):
return vec3((self.x ** a), (self.y ** a), (self.z ** a))
def dot(self, v):
return (((self.x * v.x) + (self.y * v.y)) + (self.z * v.z))
def exp(v):
return vec3(np.exp(v.x), np.exp(v.y), np.exp(v.z))
def sqrt(v):
return vec3(np.sqrt(v.x), np.sqrt(v.y), np.sqrt(v.z))
def to_array(self):
return np.array([self.x, self.y, self.z])
def cross(self, v):
return vec3(((self.y * v.z) - (self.z * v.y)), (((- self.x) * v.z) + (self.z * v.x)), ((self.x * v.y) - (self.y * v.x)))
def length(self):
return np.sqrt(self.dot(self))
def square_length(self):
return self.dot(self)
def normalize(self):
mag = self.length()
return (self * (1.0 / np.where((mag == 0), 1, mag)))
def components(self):
return (self.x, self.y, self.z)
def extract(self, cond):
return vec3(extract(cond, self.x), extract(cond, self.y), extract(cond, self.z))
def where(cond, out_true, out_false):
return vec3(np.where(cond, out_true.x, out_false.x), np.where(cond, out_true.y, out_false.y), np.where(cond, out_true.z, out_false.z))
def select(mask_list, out_list):
out_list_x = [i.x for i in out_list]
out_list_y = [i.y for i in out_list]
out_list_z = [i.z for i in out_list]
return vec3(np.select(mask_list, out_list_x), np.select(mask_list, out_list_y), np.select(mask_list, out_list_z))
def clip(self, min, max):
return vec3(np.clip(self.x, min, max), np.clip(self.y, min, max), np.clip(self.z, min, max))
def place(self, cond):
r = vec3(np.zeros(cond.shape), np.zeros(cond.shape), np.zeros(cond.shape))
np.place(r.x, cond, self.x)
np.place(r.y, cond, self.y)
np.place(r.z, cond, self.z)
return r
def repeat(self, n):
return vec3(np.repeat(self.x, n), np.repeat(self.y, n), np.repeat(self.z, n))
def reshape(self, *newshape):
return vec3(self.x.reshape(*newshape), self.y.reshape(*newshape), self.z.reshape(*newshape))
def shape(self, *newshape):
if isinstance(self.x, numbers.Number):
return 1
elif isinstance(self.x, np.ndarray):
return self.x.shape
def mean(self, axis):
return vec3(np.mean(self.x, axis=axis), np.mean(self.y, axis=axis), np.mean(self.z, axis=axis))
def __eq__(self, other):
return (((self.x == other.x) & (self.y == other.y)) & (self.z == other.z)) |
class GELUActivation(nn.Module):
def __init__(self, use_gelu_python: bool=False):
super().__init__()
if use_gelu_python:
self.act = self._gelu_python
else:
self.act = nn.functional.gelu
def _gelu_python(self, input: Tensor) -> Tensor:
return ((input * 0.5) * (1.0 + torch.erf((input / math.sqrt(2.0)))))
def forward(self, input: Tensor) -> Tensor:
return self.act(input) |
def _setup_ipython(ipython: Any=None) -> Any:
if scooby.in_ipython():
from IPython import get_ipython
ipython = get_ipython()
ipython.run_line_magic('gui', 'qt')
from IPython.external.qt_for_kernel import QtGui
QtGui.QApplication.instance()
return ipython |
class DCUN_TFC_FiLM_LaSAFT_Framework(DenseCUNet_FiLM_Framework):
def __init__(self, n_fft, hop_length, num_frame, spec_type, spec_est_mode, optimizer, lr, auto_lr_schedule, train_loss, val_loss, **kwargs):
valid_kwargs = inspect.signature(DCUN_TFC_FiLM_LaSAFT.__init__).parameters
tfc_net_kwargs = dict(((name, kwargs[name]) for name in valid_kwargs if (name in kwargs)))
tfc_net_kwargs['n_fft'] = n_fft
spec2spec = DCUN_TFC_FiLM_LaSAFT(**tfc_net_kwargs)
train_loss_ = get_conditional_loss(train_loss, n_fft, hop_length, **kwargs)
val_loss_ = get_conditional_loss(val_loss, n_fft, hop_length, **kwargs)
super(DCUN_TFC_FiLM_LaSAFT_Framework, self).__init__(n_fft, hop_length, num_frame, spec_type, spec_est_mode, spec2spec, optimizer, lr, auto_lr_schedule, train_loss_, val_loss_)
valid_kwargs = inspect.signature(DCUN_TFC_FiLM_LaSAFT_Framework.__init__).parameters
hp = [key for key in valid_kwargs.keys() if (key not in ['self', 'kwargs'])]
hp = (hp + [key for key in kwargs if (not callable(kwargs[key]))])
self.save_hyperparameters(*hp) |
def find_models_missing_data():
models_missing_data = set()
for one_model in all_models:
if (one_model in appr_classes):
continue
try:
one_model.select().get()
except one_model.DoesNotExist:
if ((one_model.__name__ not in WHITELISTED_EMPTY_MODELS) and (not is_deprecated_model(one_model))):
models_missing_data.add(one_model.__name__)
return models_missing_data |
class DriverAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
driver = getattr(drivers, values.lower())
namespace.selenium_host = (namespace.selenium_host or getattr(driver, 'HOST', None))
namespace.selenium_port = (namespace.selenium_port or getattr(driver, 'PORT', None)) |
def convert_probability_to_call(ds: Dataset, call_genotype_probability: Hashable=variables.call_genotype_probability, threshold: float=0.9, merge: bool=True) -> Dataset:
from .conversion_numba_fns import _convert_probability_to_call
if (not (0 <= threshold <= 1)):
raise ValueError(f'Threshold must be float in [0, 1], not {threshold}.')
variables.validate(ds, {call_genotype_probability: variables.call_genotype_probability_spec})
if (ds.sizes['genotypes'] != 3):
raise NotImplementedError(f"Hard call conversion only supported for diploid, biallelic genotypes; num genotypes in provided probabilities array = {ds.sizes['genotypes']}.")
GP = da.asarray(ds[call_genotype_probability])
if (len(GP.chunks[2]) > 1):
GP = GP.rechunk((None, None, (- 1)))
K = da.empty(2, dtype=np.uint8)
GT = _convert_probability_to_call(GP, K, threshold)
new_ds = create_dataset({variables.call_genotype: (('variants', 'samples', 'ploidy'), GT), variables.call_genotype_mask: (('variants', 'samples', 'ploidy'), (GT < 0))})
return conditional_merge_datasets(ds, new_ds, merge) |
_pipeline_test
class ConversationalPipelineTests(unittest.TestCase):
model_mapping = dict((list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else (([] + list(MODEL_FOR_CAUSAL_LM_MAPPING.items())) if MODEL_FOR_CAUSAL_LM_MAPPING else [])))
tf_model_mapping = dict((list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else (([] + list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.items())) if TF_MODEL_FOR_CAUSAL_LM_MAPPING else [])))
def get_test_pipeline(self, model, tokenizer, processor):
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
return (conversation_agent, [Conversation('Hi there!')])
def run_pipeline_test(self, conversation_agent, _):
outputs = conversation_agent(Conversation('Hi there!'))
self.assertEqual(outputs, Conversation(past_user_inputs=['Hi there!'], generated_responses=[ANY(str)]))
outputs = conversation_agent([Conversation('Hi there!')])
self.assertEqual(outputs, Conversation(past_user_inputs=['Hi there!'], generated_responses=[ANY(str)]))
conversation_1 = Conversation('Going to the movies tonight - any suggestions?')
conversation_2 = Conversation("What's the last book you have read?")
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
outputs = conversation_agent([conversation_1, conversation_2])
self.assertEqual(outputs, [conversation_1, conversation_2])
self.assertEqual(outputs, [Conversation(past_user_inputs=['Going to the movies tonight - any suggestions?'], generated_responses=[ANY(str)]), Conversation(past_user_inputs=["What's the last book you have read?"], generated_responses=[ANY(str)])])
conversation_2.add_user_input('Why do you recommend it?')
outputs = conversation_agent(conversation_2)
self.assertEqual(outputs, conversation_2)
self.assertEqual(outputs, Conversation(past_user_inputs=["What's the last book you have read?", 'Why do you recommend it?'], generated_responses=[ANY(str), ANY(str)]))
with self.assertRaises(ValueError):
conversation_agent('Hi there!')
with self.assertRaises(ValueError):
conversation_agent(Conversation())
with self.assertRaises(ValueError):
conversation_agent(conversation_2)
_torch
def test_integration_torch_conversation(self):
conversation_agent = pipeline(task='conversational', device=DEFAULT_DEVICE_NUM)
conversation_1 = Conversation('Going to the movies tonight - any suggestions?')
conversation_2 = Conversation("What's the last book you have read?")
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 1)
self.assertEqual(len(result[1].past_user_inputs), 1)
self.assertEqual(len(result[0].generated_responses), 1)
self.assertEqual(len(result[1].generated_responses), 1)
self.assertEqual(result[0].past_user_inputs[0], 'Going to the movies tonight - any suggestions?')
self.assertEqual(result[0].generated_responses[0], 'The Big Lebowski')
self.assertEqual(result[1].past_user_inputs[0], "What's the last book you have read?")
self.assertEqual(result[1].generated_responses[0], 'The Last Question')
conversation_2.add_user_input('Why do you recommend it?')
result = conversation_agent(conversation_2, do_sample=False, max_length=1000)
self.assertEqual(result, conversation_2)
self.assertEqual(len(result.past_user_inputs), 2)
self.assertEqual(len(result.generated_responses), 2)
self.assertEqual(result.past_user_inputs[1], 'Why do you recommend it?')
self.assertEqual(result.generated_responses[1], "It's a good book.")
_torch
def test_integration_torch_conversation_truncated_history(self):
conversation_agent = pipeline(task='conversational', min_length_for_response=24, device=DEFAULT_DEVICE_NUM)
conversation_1 = Conversation('Going to the movies tonight - any suggestions?')
self.assertEqual(len(conversation_1.past_user_inputs), 0)
result = conversation_agent(conversation_1, do_sample=False, max_length=36)
self.assertEqual(result, conversation_1)
self.assertEqual(len(result.past_user_inputs), 1)
self.assertEqual(len(result.generated_responses), 1)
self.assertEqual(result.past_user_inputs[0], 'Going to the movies tonight - any suggestions?')
self.assertEqual(result.generated_responses[0], 'The Big Lebowski')
conversation_1.add_user_input('Is it an action movie?')
result = conversation_agent(conversation_1, do_sample=False, max_length=36)
self.assertEqual(result, conversation_1)
self.assertEqual(len(result.past_user_inputs), 2)
self.assertEqual(len(result.generated_responses), 2)
self.assertEqual(result.past_user_inputs[1], 'Is it an action movie?')
self.assertEqual(result.generated_responses[1], "It's a comedy.")
_torch
def test_small_model_pt(self):
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small')
model = AutoModelForCausalLM.from_pretrained('microsoft/DialoGPT-small')
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation = Conversation('hello')
output = conversation_agent(conversation)
self.assertEqual(output, Conversation(past_user_inputs=['hello'], generated_responses=['Hi']))
_tf
def test_small_model_tf(self):
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small')
model = TFAutoModelForCausalLM.from_pretrained('microsoft/DialoGPT-small')
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation = Conversation('hello')
output = conversation_agent(conversation)
self.assertEqual(output, Conversation(past_user_inputs=['hello'], generated_responses=['Hi']))
_torch
def test_integration_torch_conversation_dialogpt_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small')
model = AutoModelForCausalLM.from_pretrained('microsoft/DialoGPT-small')
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation('hello')
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs['input_ids'].tolist(), [[31373, 50256]])
conversation_2 = Conversation('how are you ?', past_user_inputs=['hello'], generated_responses=['Hi there!'])
inputs = conversation_agent.preprocess(conversation_2)
self.assertEqual(inputs['input_ids'].tolist(), [[31373, 50256, 17250, 612, 0, 50256, 4919, 389, 345, 5633, 50256]])
_torch
def test_integration_torch_conversation_blenderbot_400M_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained('facebook/blenderbot-400M-distill')
model = AutoModelForSeq2SeqLM.from_pretrained('facebook/blenderbot-400M-distill')
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation('hello')
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs['input_ids'].tolist(), [[1710, 86, 2]])
conversation_1 = Conversation('I like lasagne.', past_user_inputs=['hello'], generated_responses=[" Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie."])
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs['input_ids'].tolist(), [[1710, 86, 228, 228, 946, 304, 398, 6881, 558, 964, 38, 452, 315, 265, 6252, 452, 322, 968, 6884, 3146, 278, 306, 265, 617, 87, 388, 75, 341, 286, 521, 21, 228, 228, 281, 398, 6881, 558, 964, 21, 2]])
_torch
def test_integration_torch_conversation_blenderbot_400M(self):
tokenizer = AutoTokenizer.from_pretrained('facebook/blenderbot-400M-distill')
model = AutoModelForSeq2SeqLM.from_pretrained('facebook/blenderbot-400M-distill')
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation('hello')
result = conversation_agent(conversation_1)
self.assertEqual(result.generated_responses[0], ' Hello! How are you doing today? I just got back from a walk with my dog.')
conversation_1 = Conversation('Lasagne hello')
result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3)
self.assertEqual(result.generated_responses[0], " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie.")
conversation_1 = Conversation('Lasagne hello Lasagne is my favorite Italian dish. Do you like lasagne? I like lasagne.')
result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3)
self.assertEqual(result.generated_responses[0], ' Me too. I like how it can be topped with vegetables, meats, and condiments.')
_torch
def test_integration_torch_conversation_encoder_decoder(self):
tokenizer = AutoTokenizer.from_pretrained('facebook/blenderbot_small-90M')
model = AutoModelForSeq2SeqLM.from_pretrained('facebook/blenderbot_small-90M')
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=DEFAULT_DEVICE_NUM)
conversation_1 = Conversation('My name is Sarah and I live in London')
conversation_2 = Conversation('Going to the movies tonight, What movie would you recommend? ')
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 1)
self.assertEqual(len(result[1].past_user_inputs), 1)
self.assertEqual(len(result[0].generated_responses), 1)
self.assertEqual(len(result[1].generated_responses), 1)
self.assertEqual(result[0].past_user_inputs[0], 'My name is Sarah and I live in London')
self.assertEqual(result[0].generated_responses[0], 'hi sarah, i live in london as well. do you have any plans for the weekend?')
self.assertEqual(result[1].past_user_inputs[0], 'Going to the movies tonight, What movie would you recommend? ')
self.assertEqual(result[1].generated_responses[0], "i don't know... i'm not really sure. what movie are you going to see?")
conversation_1.add_user_input('Not yet, what about you?')
conversation_2.add_user_input("What's your name?")
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 2)
self.assertEqual(len(result[1].past_user_inputs), 2)
self.assertEqual(len(result[0].generated_responses), 2)
self.assertEqual(len(result[1].generated_responses), 2)
self.assertEqual(result[0].past_user_inputs[1], 'Not yet, what about you?')
self.assertEqual(result[0].generated_responses[1], "i don't have any plans yet. i'm not sure what to do yet.")
self.assertEqual(result[1].past_user_inputs[1], "What's your name?")
self.assertEqual(result[1].generated_responses[1], "i don't have a name, but i'm going to see a horror movie.")
_torch
def test_from_pipeline_conversation(self):
model_id = 'facebook/blenderbot_small-90M'
conversation_agent_from_model_id = pipeline('conversational', model=model_id, tokenizer=model_id)
model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_id)
tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id)
conversation_agent_from_model = pipeline('conversational', model=model, tokenizer=tokenizer)
conversation = Conversation('My name is Sarah and I live in London')
conversation_copy = Conversation('My name is Sarah and I live in London')
result_model_id = conversation_agent_from_model_id([conversation])
result_model = conversation_agent_from_model([conversation_copy])
self.assertEqual(result_model_id.generated_responses[0], 'hi sarah, i live in london as well. do you have any plans for the weekend?')
self.assertEqual(result_model_id.generated_responses[0], result_model.generated_responses[0]) |
_module()
class DavisDataset(RawframeDataset):
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128], [128, 64, 128]]
def __init__(self, ann_file, pipeline, data_prefix=None, anno_prefix=None, test_mode=False, split='val', data_root='data/davis2017', task='semi-supervised'):
assert (split in ['train', 'val'])
assert (task in ['semi-supervised'])
self.split = split
self.data_root = data_root
self.task = task
self.anno_prefix = anno_prefix
super().__init__(ann_file, pipeline, data_prefix, test_mode, filename_tmpl='{:05}.jpg', with_offset=False, multi_class=False, num_classes=None, start_index=0, modality='RGB')
def prepare_test_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
ann_frame_dir = results['frame_dir'].replace(self.data_prefix, self.anno_prefix)
results['seg_map'] = osp.join(ann_frame_dir, self.filename_tmpl.format(0).replace('jpg', 'png'))
return self.pipeline(results)
def davis_evaluate(self, results, output_dir, logger=None):
dataset_eval = DAVISEvaluation(davis_root=self.data_root, task=self.task, gt_set=self.split)
if isinstance(results, str):
metrics_res = dataset_eval.evaluate(results)
else:
assert (len(results) == len(self))
for vid_idx in range(len(self)):
assert ((len(results[vid_idx]) == self.video_infos[vid_idx]['total_frames']) or isinstance(results[vid_idx], str))
if (output_dir is None):
tmp_dir = tempfile.TemporaryDirectory()
output_dir = tmp_dir.name
else:
tmp_dir = None
mmcv.mkdir_or_exist(output_dir)
if terminal_is_available():
prog_bar = mmcv.ProgressBar(len(self))
for vid_idx in range(len(results)):
cur_results = results[vid_idx]
if isinstance(cur_results, str):
file_path = cur_results
cur_results = np.load(file_path)
os.remove(file_path)
for img_idx in range(self.video_infos[vid_idx]['total_frames']):
result = cur_results[img_idx].astype(np.uint8)
img = Image.fromarray(result)
img.putpalette(np.asarray(self.PALETTE, dtype=np.uint8).ravel())
frame_dir = self.video_infos[vid_idx]['frame_dir']
save_path = osp.join(output_dir, osp.relpath(frame_dir, self.data_prefix), self.filename_tmpl.format(img_idx).replace('jpg', 'png'))
mmcv.mkdir_or_exist(osp.dirname(save_path))
img.save(save_path)
if terminal_is_available():
prog_bar.update()
metrics_res = dataset_eval.evaluate(output_dir)
if (tmp_dir is not None):
tmp_dir.cleanup()
(J, F) = (metrics_res['J'], metrics_res['F'])
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = ((np.mean(J['M']) + np.mean(F['M'])) / 2.0)
g_res = np.array([final_mean, np.mean(J['M']), np.mean(J['R']), np.mean(J['D']), np.mean(F['M']), np.mean(F['R']), np.mean(F['D'])])
g_res = np.reshape(g_res, [1, len(g_res)])
print_log(f'''
Global results for {self.split}''', logger=logger)
table_g = pd.DataFrame(data=g_res, columns=g_measures)
print_log(('\n' + table_g.to_string(index=False)), logger=logger)
seq_names = list(J['M_per_object'].keys())
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures)
print_log(f'''
Per sequence results for {self.split}''', logger=logger)
print_log(('\n' + table_seq.to_string(index=False)), logger=logger)
eval_results = table_g.to_dict('records')[0]
return eval_results
def evaluate(self, results, metrics='daivs', output_dir=None, logger=None):
metrics = (metrics if isinstance(metrics, (list, tuple)) else [metrics])
allowed_metrics = ['davis']
for metric in metrics:
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
eval_results = dict()
if (mmcv.is_seq_of(results, np.ndarray) and (results[0].ndim == 4)):
num_feats = results[0].shape[0]
for feat_idx in range(num_feats):
cur_results = [result[feat_idx] for result in results]
eval_results.update(add_prefix(self.davis_evaluate(cur_results, output_dir, logger), prefix=f'feat_{feat_idx}'))
elif mmcv.is_seq_of(results, list):
num_feats = len(results[0])
for feat_idx in range(num_feats):
cur_results = [result[feat_idx] for result in results]
eval_results.update(add_prefix(self.davis_evaluate(cur_results, output_dir, logger), prefix=f'feat_{feat_idx}'))
else:
eval_results.update(self.davis_evaluate(results, output_dir, logger))
copypaste = []
for (k, v) in eval_results.items():
if ('J&F' in k):
copypaste.append(f'{(float(v) * 100):.2f}')
print_log(f"Results copypaste {','.join(copypaste)}", logger=logger)
return eval_results |
def module_to_test_file(module_fname):
splits = module_fname.split(os.path.sep)
short_name = os.path.sep.join(splits[2:])
if (short_name in SPECIAL_MODULE_TO_TEST_MAP):
test_file = SPECIAL_MODULE_TO_TEST_MAP[short_name]
if isinstance(test_file, str):
return f'tests/{test_file}'
return [f'tests/{f}' for f in test_file]
module_name = splits[(- 1)]
if module_name.endswith('_fast.py'):
module_name = module_name.replace('_fast.py', '.py')
if ((len(splits) >= 2) and (splits[(- 2)] == 'pipelines')):
default_test_file = f'tests/pipelines/test_pipelines_{module_name}'
elif ((len(splits) >= 2) and (splits[(- 2)] == 'benchmark')):
return ['tests/benchmark/test_benchmark.py', 'tests/benchmark/test_benchmark_tf.py']
elif ((len(splits) >= 2) and (splits[(- 2)] == 'commands')):
return 'tests/utils/test_cli.py'
elif ((len(splits) >= 2) and (splits[(- 2)] == 'onnx')):
return ['tests/onnx/test_onnx.py', 'tests/onnx/test_onnx_v2.py']
elif ((len(splits) > 0) and (splits[0] == 'utils')):
default_test_file = f'tests/utils/test_utils_{module_name}'
elif ((len(splits) > 4) and (splits[2] == 'models')):
default_test_file = f'tests/{splits[3]}/test_{module_name}'
elif ((len(splits) > 2) and splits[2].startswith('generation')):
default_test_file = f'tests/generation/test_{module_name}'
elif ((len(splits) > 2) and splits[2].startswith('trainer')):
default_test_file = f'tests/trainer/test_{module_name}'
else:
default_test_file = f'tests/utils/test_{module_name}'
if os.path.isfile(default_test_file):
return default_test_file
if ('processing' in default_test_file):
test_file = default_test_file.replace('processing', 'processor')
if os.path.isfile(test_file):
return test_file |
def evaluate_subgoals_mc(env, model, dataset, extractor, trial_uid, dataset_idx, args, obj_predictor):
(traj_data, traj_key) = dataset.jsons_and_keys[dataset_idx]
(r_idx, subgoal_idx) = (int(trial_uid.split(':')[1]), int(trial_uid.split(':')[2]))
if (not (traj_data['repeat_idx'] == r_idx)):
print(traj_data)
model.reset()
eval_util.setup_scene(env, traj_data, reward_type='dense')
vocab = {'word': dataset.vocab_in, 'action_low': model.vocab_out}
input_dict = eval_util.load_language(dataset, traj_data, traj_key, model.args, extractor, subgoal_idx)
expert_dict = eval_util.load_expert_actions(dataset, traj_data, traj_key, subgoal_idx)
task_info = eval_util.read_task_data(traj_data, subgoal_idx)
if args.debug:
print(task_info)
(prev_action, subgoal_success, init_failed) = (None, False, False)
for a_expert in expert_dict['actions']:
input_dict['frames'] = eval_util.get_observation(env.last_event, extractor)
(init_failed, prev_action) = eval_util.expert_step(a_expert['action'], expert_dict['masks'], model, input_dict, vocab, prev_action, env, args)
if init_failed:
break
mc_lists = []
(t_agent, t_expert, num_fails, reward) = (0, len(expert_dict['actions']), 0, 0)
if (not init_failed):
env.task.goal_idx = task_info['subgoal_idx']
env.task.finished = (task_info['subgoal_idx'] - 1)
while (t_agent < args.max_steps):
input_dict['frames'] = eval_util.get_observation(env.last_event, extractor)
(episode_end, prev_action, num_fails, _, _, mc_array) = eval_util.agent_step_mc(model, input_dict, vocab, prev_action, env, args, num_fails, obj_predictor)
mc_lists.append((mc_array[0] - mc_array[1]))
reward += env.get_transition_reward()[0]
subgoal_success = (env.get_subgoal_idx() == subgoal_idx)
t_agent += 1
if (episode_end or subgoal_success):
break
metrics = compute_metrics(subgoal_success, subgoal_idx, reward, traj_data, t_agent, env.get_goal_conditions_met())
return (dict(**metrics, **task_info), mc_lists) |
def get_parameter_device(parameter: torch.nn.Module):
try:
return next(parameter.parameters()).device
except StopIteration:
def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[(str, Tensor)]]:
tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device |
class BatchMolGraph():
def __init__(self, mol_graphs: List[MolGraph]):
self.atom_fdim = get_atom_fdim()
self.bond_fdim = get_bond_fdim()
self.n_atoms = 1
self.n_bonds = 1
self.a_scope = []
self.b_scope = []
f_atoms = [([0] * self.atom_fdim)]
f_bonds = [([0] * self.bond_fdim)]
a2b = [[]]
b2a = [0]
b2revb = [0]
for mol_graph in mol_graphs:
f_atoms.extend(mol_graph.f_atoms)
f_bonds.extend(mol_graph.f_bonds)
for a in range(mol_graph.n_atoms):
a2b.append([(b + self.n_bonds) for b in mol_graph.a2b[a]])
for b in range(mol_graph.n_bonds):
b2a.append((self.n_atoms + mol_graph.b2a[b]))
b2revb.append((self.n_bonds + mol_graph.b2revb[b]))
self.a_scope.append((self.n_atoms, mol_graph.n_atoms))
self.b_scope.append((self.n_bonds, mol_graph.n_bonds))
self.n_atoms += mol_graph.n_atoms
self.n_bonds += mol_graph.n_bonds
self.max_num_bonds = max(1, max((len(in_bonds) for in_bonds in a2b)))
self.f_atoms = torch.tensor(f_atoms)
self.f_bonds = torch.tensor(f_bonds)
self.a2b = torch.tensor([(a2b[a] + ([0] * (self.max_num_bonds - len(a2b[a])))) for a in range(self.n_atoms)])
self.b2a = torch.tensor(b2a)
self.b2revb = torch.tensor(b2revb)
self.b2b = None
self.a2a = None
def get_components(self, atom_messages: bool=False) -> Tuple[(torch.FloatTensor, torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor, List[Tuple[(int, int)]], List[Tuple[(int, int)]], Optional[torch.LongTensor], Optional[torch.LongTensor])]:
f_bonds = self.f_bonds
return (self.f_atoms, f_bonds, self.a2b, self.b2a, self.b2revb, self.a_scope, self.b_scope, self.b2b, self.a2a)
def get_b2b(self) -> torch.LongTensor:
if (self.b2b is None):
b2b = self.a2b[self.b2a]
revmask = (b2b != self.b2revb.unsqueeze(1).repeat(1, b2b.size(1))).long()
self.b2b = (b2b * revmask)
return self.b2b
def get_a2a(self) -> torch.LongTensor:
if (self.a2a is None):
self.a2a = self.b2a[self.a2b]
return self.a2a |
.parametrize('prefer_grpc', [False, True])
def test_conditional_payload_update(prefer_grpc):
client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT)
client.recreate_collection(collection_name=COLLECTION_NAME, vectors_config=VectorParams(size=DIM, distance=Distance.DOT), timeout=TIMEOUT)
uuid1 = str(uuid.uuid4())
uuid2 = str(uuid.uuid4())
uuid3 = str(uuid.uuid4())
uuid4 = str(uuid.uuid4())
client.upsert(collection_name=COLLECTION_NAME, points=[PointStruct(id=uuid1, payload={'a': 1}, vector=np.random.rand(DIM).tolist()), PointStruct(id=uuid2, payload={'a': 2}, vector=np.random.rand(DIM).tolist()), PointStruct(id=uuid3, payload={'b': 1}, vector=np.random.rand(DIM).tolist()), PointStruct(id=uuid4, payload={'b': 2}, vector=np.random.rand(DIM).tolist())], wait=True)
res = client.retrieve(collection_name=COLLECTION_NAME, ids=[uuid1, uuid2, uuid4])
assert (len(res) == 3)
retrieved_ids = [uuid.UUID(point.id) for point in res]
assert (uuid.UUID(uuid1) in retrieved_ids)
assert (uuid.UUID(uuid2) in retrieved_ids)
assert (uuid.UUID(uuid4) in retrieved_ids) |
def crypt(password, salt):
if (len(salt) == 0):
salt = b'AA'
elif (len(salt) == 1):
salt = (salt + b'A')
Eswap0 = _con_salt[(salt[0] & 127)]
Eswap1 = (_con_salt[(salt[1] & 127)] << 4)
ks = _set_key((password + b'\x00\x00\x00\x00\x00\x00\x00\x00')[:8])
(o1, o2) = _body(ks, Eswap0, Eswap1)
t1 = ((((o1 << 16) & ) | (o1 & 65280)) | ((o1 >> 16) & 255))
t2 = ((((o1 >> 8) & ) | ((o2 << 8) & 65280)) | ((o2 >> 8) & 255))
t3 = ((o2 & ) | ((o2 >> 16) & 65280))
r = [((t1 >> 18) & 63), ((t1 >> 12) & 63), ((t1 >> 6) & 63), (t1 & 63), ((t2 >> 18) & 63), ((t2 >> 12) & 63), ((t2 >> 6) & 63), (t2 & 63), ((t3 >> 18) & 63), ((t3 >> 12) & 63), ((t3 >> 6) & 63)]
for i in range(len(r)):
r[i] = _cov_2char[r[i]:(r[i] + 1)]
return (salt[:2] + b''.join(r)) |
class BatchIndexWriterMixin(object):
def __init__(self, uri, db, conn, title):
super(BatchIndexWriterMixin, self).__init__(uri, db, conn, title)
self._property_name_values = []
self._rule_smiles_values = []
self._rule_values = []
self._environment_fingerprint_values = []
self._rule_environment_values = []
self._compound_values = []
self._constant_smiles_values = []
self._rule_environment_pair_values = []
self._compound_property_values = []
self._rule_environment_statistics_values = []
self._check_flush = _trigger(100000)
def execute_many(self, sql, values):
self.conn.executemany(sql, values)
def add_property_name(self, property_name_idx, property_name):
self._property_name_values.append((property_name_idx, property_name))
if next(self._check_flush):
self.flush()
def add_rule_smiles(self, smiles_idx, smiles):
self._rule_smiles_values.append((smiles_idx, smiles, get_num_heavies_from_smiles(smiles)))
if next(self._check_flush):
self.flush()
def add_rule(self, rule_idx, from_smiles_idx, to_smiles_idx):
self._rule_values.append((rule_idx, from_smiles_idx, to_smiles_idx))
if next(self._check_flush):
self.flush()
def add_environment_fingerprint(self, fp_idx, smarts, pseudosmiles, parent_smarts):
if (parent_smarts is None):
parent_smarts = ''
self._environment_fingerprint_values.append((fp_idx, smarts, pseudosmiles, parent_smarts))
if next(self._check_flush):
self.flush()
def add_rule_environment(self, rule_env_idx, rule_idx, env_fp_idx, radius):
self._rule_environment_values.append((rule_env_idx, rule_idx, env_fp_idx, radius, 0))
if next(self._check_flush):
self.flush()
def add_compound(self, compound_idx, compound_id, input_smiles, normalized_smiles, num_normalized_heavies):
self._compound_values.append((compound_idx, compound_id, input_smiles, normalized_smiles, num_normalized_heavies))
if next(self._check_flush):
self.flush()
def add_constant_smiles(self, smiles_idx, constant_smiles):
self._constant_smiles_values.append((smiles_idx, constant_smiles))
if next(self._check_flush):
self.flush()
def add_rule_environment_pair(self, pair_idx, env_idx, compound1_idx, compound2_idx, constant_idx):
self._rule_environment_pair_values.append((pair_idx, env_idx, compound1_idx, compound2_idx, constant_idx))
if next(self._check_flush):
self.flush()
def add_compound_property(self, compound_idx, property_name_idx, value):
self._compound_property_values.append((compound_idx, property_name_idx, value))
if next(self._check_flush):
self.flush()
def add_rule_environment_statistics(self, rule_env_idx, property_name_idx, values):
self._rule_environment_statistics_values.append(((rule_env_idx, property_name_idx) + tuple(values)))
if next(self._check_flush):
self.flush()
def flush(self):
for (values, sql) in ((self._property_name_values, self.ADD_PROPERTY_NAME_SQL), (self._rule_smiles_values, self.ADD_RULE_SMILES_SQL), (self._rule_values, self.ADD_RULE_SQL), (self._environment_fingerprint_values, self.ADD_ENVIRONMENT_FINGERPRINT_SQL), (self._rule_environment_values, self.ADD_RULE_ENVIRONMENT_SQL), (self._compound_values, self.ADD_COMPOUND_SQL), (self._constant_smiles_values, self.ADD_CONSTANT_SMILES_SQL), (self._rule_environment_pair_values, self.ADD_RULE_ENVIRONMENT_PAIR_SQL), (self._compound_property_values, self.ADD_COMPOUND_PROPERTY_SQL), (self._rule_environment_statistics_values, self.ADD_RULE_ENVIRONMENT_STATISTICS_SQL)):
if values:
self.execute_many(sql, values)
del values[:] |
class TornadoServer(ServerAdapter):
def run(self, handler):
import tornado.wsgi, tornado. tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start() |
def run_python(*args, python=sys.executable, **kwargs):
if ((not isinstance(python, str)) and (python is not None)):
try:
python = python.sys.executable
except AttributeError:
raise TypeError(f'expected python str, got {python!r}')
return run_cmd([python, *args], **kwargs) |
class DataCollection():
TASKS = ['pour', 'scoop', 'stab', 'cut', 'lift', 'hammer', 'handover']
STATES = {'cup': ['hot', 'cold', 'empty'], 'bowl': ['filled', 'empty'], 'spatula': ['has stuff', 'empty'], 'bottle': ['lid on', 'lid off'], 'pan': ['hot', 'empty']}
TASK_DESCRIPTIONS = {'pour': 'Grasp the object to pour the liquid out', 'scoop': 'Grasp the object to scoop something', 'stab': 'Grasp the object to stab', 'cut': 'Grasp the object to cut', 'lift': 'Grasp the object to used it for lifting something up. For example, use the spatula to lift an fried egg up.', 'hammer': 'Grasp the object to hammer a nail', 'handover': 'Grasp the object to hand it over to someone'}
NUM_SAMPLE_GRASPS = 20
def __init__(self, collect_objects=True):
if collect_objects:
self.semantic_objects_topic = rospy.get_param('~semantic_objects_with_grasps_topic')
self.data_dir = rospy.get_param('~data_dir_path', os.path.join(rospkg.RosPack().get_path('rail_semantic_grasping'), 'data'))
if (not os.path.exists(self.data_dir)):
os.mkdir(self.data_dir)
rospy.loginfo('Data folder is set up at {}'.format(self.data_dir))
else:
rospy.loginfo('Data folder is at {}'.format(self.data_dir))
self.unlabeled_data_dir = os.path.join(self.data_dir, 'unlabeled')
if (not os.path.exists(self.unlabeled_data_dir)):
os.mkdir(self.unlabeled_data_dir)
self.labeled_data_dir = os.path.join(self.data_dir, 'labeled')
if (not os.path.exists(self.labeled_data_dir)):
os.mkdir(self.labeled_data_dir)
if collect_objects:
time = datetime.now()
date = time.strftime('%Y_%m_%d_%H_%M')
self.session_dir = os.path.join(self.unlabeled_data_dir, date)
os.mkdir(self.session_dir)
rospy.loginfo('Start data collection session---data will be saved to {}'.format(self.session_dir))
self.object_counter = 0
if collect_objects:
self.semantic_objects_sub = rospy.Subscriber(self.semantic_objects_topic, SemanticObjectList, self.semantic_objects_callback)
rospy.loginfo('Listen to semantic objects with grasp from {}'.format(self.semantic_objects_topic))
else:
self.markers_pub = rospy.Publisher('~data_collection/markers', MarkerArray, queue_size=10, latch=True)
self.grasp_pub = rospy.Publisher('~data_collection/grasp', PoseStamped, queue_size=10, latch=True)
self.marker_pub = rospy.Publisher('~data_collection/marker', Marker, queue_size=10, latch=True)
self.color_image_pub = rospy.Publisher('~data_collection/color_image', Image, queue_size=10, latch=True)
self.pc_pub = rospy.Publisher('~data_collection/point_cloud', PointCloud2, queue_size=10, latch=True)
def semantic_objects_callback(self, semantic_objects):
object_file_path = os.path.join(self.session_dir, (str(self.object_counter) + '.pkl'))
with open(object_file_path, 'wb') as fh:
pickle.dump(semantic_objects, fh)
rospy.loginfo('Saved object No.{}'.format(self.object_counter))
self.object_counter += 1
def collect_grasps(self):
session_dirs = glob.glob(os.path.join(self.unlabeled_data_dir, '*'))
for session_dir in session_dirs:
object_files = glob.glob(os.path.join(session_dir, '*.pkl'))
labeled_session_dir = session_dir.replace('unlabeled', 'labeled')
if (not os.path.exists(labeled_session_dir)):
os.mkdir(labeled_session_dir)
for object_file in object_files:
with open(object_file, 'rb') as fh:
semantic_objects = pickle.load(fh)
key = raw_input('Proceed with semantic objects: {}? y/n '.format(object_file))
if (key != 'y'):
continue
markers = MarkerArray()
marker = Marker()
if (not semantic_objects.objects):
continue
semantic_object = semantic_objects.objects[0]
object_class = semantic_object.name
marker = semantic_object.marker
for semantic_part in semantic_object.parts:
markers.markers.append(semantic_part.marker)
markers.markers.append(semantic_part.text_marker)
self.markers_pub.publish(markers)
self.color_image_pub.publish(semantic_object.color_image)
self.pc_pub.publish(semantic_object.point_cloud)
self.marker_pub.publish(marker)
if (not semantic_object.grasps):
continue
rospy.loginfo(('#' * 100))
rospy.loginfo('Current object has {} grasps'.format(len(semantic_object.grasps)))
sampled_grasps = np.random.choice(semantic_object.grasps, DataCollection.NUM_SAMPLE_GRASPS, replace=False).tolist()
rospy.loginfo('Sample {} grasps for labeling'.format(len(sampled_grasps)))
labeled_grasps = []
skip_object = False
for task in DataCollection.TASKS:
for state in DataCollection.STATES[object_class]:
rospy.loginfo(('*' * 100))
rospy.loginfo('')
rospy.loginfo('For task: {}'.format(task))
rospy.loginfo('')
rospy.loginfo('For state: {}'.format(state))
rospy.loginfo('')
rospy.loginfo(('*' * 100))
grasps_for_task = copy.deepcopy(sampled_grasps)
context = '_'.join([task, state])
skip_context = 0
for (gi, semantic_grasp) in enumerate(grasps_for_task):
semantic_grasp.task = context
pose_stamped = PoseStamped()
pose_stamped.header.frame_id = semantic_objects.header.frame_id
pose_stamped.pose = semantic_grasp.grasp_pose
self.grasp_pub.publish(pose_stamped)
rospy.loginfo('Grasp No.{}/{} is on the part with affordance {} and material {}'.format((gi + 1), DataCollection.NUM_SAMPLE_GRASPS, semantic_grasp.grasp_part_affordance, semantic_grasp.grasp_part_material))
if skip_context:
if (skip_context == 2):
semantic_grasp.score = 0
elif (skip_context == 3):
semantic_grasp.score = (- 1)
continue
valid = False
while (not valid):
key = raw_input('Is this grasp semantically correct? absolutely(press 1) / ok(press 2) / definitely not(press 3) ')
if (key == '1'):
semantic_grasp.score = 1
valid = True
elif (key == '3'):
semantic_grasp.score = (- 1)
valid = True
elif ((key == '2') or (key == '')):
semantic_grasp.score = 0
valid = True
elif (key == 'q'):
skip_object = True
break
elif (key == '22'):
rospy.loginfo('All grasps for this context will be labeled as semantically ok!')
skip_context = 2
semantic_grasp.score = 0
valid = True
elif (key == '33'):
rospy.loginfo('All grasps for this context will be labeled as semantically incorrect!')
skip_context = 3
semantic_grasp.score = (- 1)
valid = True
else:
rospy.loginfo('Not a valid input, try again')
if skip_object:
break
labeled_grasps += grasps_for_task
if skip_object:
break
if skip_object:
break
if skip_object:
continue
semantic_object.labeled_grasps = labeled_grasps
rospy.loginfo('Saving labeled grasps...\n')
new_object_file = object_file.replace('unlabeled', 'labeled')
with open(new_object_file, 'wb') as fh:
pickle.dump(semantic_objects, fh)
for i in range(len(markers.markers)):
markers.markers[i].action = 2
marker.action = 2
self.markers_pub.publish(markers)
self.marker_pub.publish(marker)
clear_pc = PointCloud2()
clear_pc.header = semantic_object.point_cloud.header
self.pc_pub.publish(clear_pc)
valid = False
while (not valid):
key = raw_input("Next object? type '!' to continue")
if (key == '!'):
semantic_grasp.score = 1
valid = True
else:
rospy.loginfo('Not a valid input, try again')
if skip_object:
break
rospy.loginfo('All objects has finished labeling. Exiting!')
exit() |
def test_qdata_round_trip(tmpdir):
with tmpdir.as_cwd():
mol = Ligand.from_file(file_name=get_data('biphenyl.sdf'))
td_ref = TorsionDriveData.from_qdata(dihedral=(6, 10, 11, 8), qdata_file=get_data('biphenyl_qdata.txt'))
export_torsiondrive_data(molecule=mol, tdrive_data=td_ref)
td_new = TorsionDriveData.from_qdata(dihedral=(6, 10, 11, 8), qdata_file='qdata.txt')
assert (td_ref.dihedral == td_new.dihedral)
for (angle, ref_result) in td_ref.reference_data.items():
new_result = td_new.reference_data[angle]
assert (ref_result.angle == new_result.angle)
assert (ref_result.energy == pytest.approx(new_result.energy))
assert np.allclose(ref_result.geometry.tolist(), new_result.geometry.tolist()) |
class SSHCertificate():
def __init__(self, _nonce: memoryview, _public_key: SSHPublicKeyTypes, _serial: int, _cctype: int, _key_id: memoryview, _valid_principals: list[bytes], _valid_after: int, _valid_before: int, _critical_options: dict[(bytes, bytes)], _extensions: dict[(bytes, bytes)], _sig_type: memoryview, _sig_key: memoryview, _inner_sig_type: memoryview, _signature: memoryview, _tbs_cert_body: memoryview, _cert_key_type: bytes, _cert_body: memoryview):
self._nonce = _nonce
self._public_key = _public_key
self._serial = _serial
try:
self._type = SSHCertificateType(_cctype)
except ValueError:
raise ValueError('Invalid certificate type')
self._key_id = _key_id
self._valid_principals = _valid_principals
self._valid_after = _valid_after
self._valid_before = _valid_before
self._critical_options = _critical_options
self._extensions = _extensions
self._sig_type = _sig_type
self._sig_key = _sig_key
self._inner_sig_type = _inner_sig_type
self._signature = _signature
self._cert_key_type = _cert_key_type
self._cert_body = _cert_body
self._tbs_cert_body = _tbs_cert_body
def nonce(self) -> bytes:
return bytes(self._nonce)
def public_key(self) -> SSHCertPublicKeyTypes:
return typing.cast(SSHCertPublicKeyTypes, self._public_key)
def serial(self) -> int:
return self._serial
def type(self) -> SSHCertificateType:
return self._type
def key_id(self) -> bytes:
return bytes(self._key_id)
def valid_principals(self) -> list[bytes]:
return self._valid_principals
def valid_before(self) -> int:
return self._valid_before
def valid_after(self) -> int:
return self._valid_after
def critical_options(self) -> dict[(bytes, bytes)]:
return self._critical_options
def extensions(self) -> dict[(bytes, bytes)]:
return self._extensions
def signature_key(self) -> SSHCertPublicKeyTypes:
sigformat = _lookup_kformat(self._sig_type)
(signature_key, sigkey_rest) = sigformat.load_public(self._sig_key)
_check_empty(sigkey_rest)
return signature_key
def public_bytes(self) -> bytes:
return ((bytes(self._cert_key_type) + b' ') + binascii.b2a_base64(bytes(self._cert_body), newline=False))
def verify_cert_signature(self) -> None:
signature_key = self.signature_key()
if isinstance(signature_key, ed25519.Ed25519PublicKey):
signature_key.verify(bytes(self._signature), bytes(self._tbs_cert_body))
elif isinstance(signature_key, ec.EllipticCurvePublicKey):
(r, data) = _get_mpint(self._signature)
(s, data) = _get_mpint(data)
_check_empty(data)
computed_sig = asym_utils.encode_dss_signature(r, s)
hash_alg = _get_ec_hash_alg(signature_key.curve)
signature_key.verify(computed_sig, bytes(self._tbs_cert_body), ec.ECDSA(hash_alg))
else:
assert isinstance(signature_key, rsa.RSAPublicKey)
if (self._inner_sig_type == _SSH_RSA):
hash_alg = hashes.SHA1()
elif (self._inner_sig_type == _SSH_RSA_SHA256):
hash_alg = hashes.SHA256()
else:
assert (self._inner_sig_type == _SSH_RSA_SHA512)
hash_alg = hashes.SHA512()
signature_key.verify(bytes(self._signature), bytes(self._tbs_cert_body), padding.PKCS1v15(), hash_alg) |
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if (addresses and (len(addresses) > 0)):
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = (addresses.index(str(address_e.text())) + 1)
i = (i % len(addresses))
address_e.setText(addresses[i])
except ValueError:
if (addresses and (len(addresses) > 0)):
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return (hbox, address_e) |
def test_build_sdist_with_bad_path_dep_succeeds(caplog: LogCaptureFixture) -> None:
with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, 'with_bad_path_dep')):
api.build_sdist(tmp_dir)
assert (len(caplog.records) == 1)
record = caplog.records[0]
assert (record.levelname == 'WARNING')
assert ('does not exist' in record.message) |
class DAF3D(nn.Module):
def __init__(self):
super(DAF3D, self).__init__()
self.backbone = BackBone3D()
self.down4 = nn.Sequential(nn.Conv3d(2048, 128, kernel_size=1), nn.GroupNorm(32, 128), nn.PReLU())
self.down3 = nn.Sequential(nn.Conv3d(1024, 128, kernel_size=1), nn.GroupNorm(32, 128), nn.PReLU())
self.down2 = nn.Sequential(nn.Conv3d(512, 128, kernel_size=1), nn.GroupNorm(32, 128), nn.PReLU())
self.down1 = nn.Sequential(nn.Conv3d(256, 128, kernel_size=1), nn.GroupNorm(32, 128), nn.PReLU())
self.fuse1 = nn.Sequential(nn.Conv3d(512, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU())
self.attention4 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.Sigmoid())
self.attention3 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.Sigmoid())
self.attention2 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.Sigmoid())
self.attention1 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.Sigmoid())
self.refine4 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU())
self.refine3 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU())
self.refine2 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU())
self.refine1 = nn.Sequential(nn.Conv3d(192, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU(), nn.Conv3d(64, 64, kernel_size=3, padding=1), nn.GroupNorm(32, 64), nn.PReLU())
self.refine = nn.Sequential(nn.Conv3d(256, 64, kernel_size=1), nn.GroupNorm(32, 64), nn.PReLU())
rates = (1, 6, 12, 18)
self.aspp1 = ASPP_module(64, 64, rate=rates[0])
self.aspp2 = ASPP_module(64, 64, rate=rates[1])
self.aspp3 = ASPP_module(64, 64, rate=rates[2])
self.aspp4 = ASPP_module(64, 64, rate=rates[3])
self.aspp_conv = nn.Conv3d(256, 64, 1)
self.aspp_gn = nn.GroupNorm(32, 64)
self.predict1_4 = nn.Conv3d(128, 1, kernel_size=1)
self.predict1_3 = nn.Conv3d(128, 1, kernel_size=1)
self.predict1_2 = nn.Conv3d(128, 1, kernel_size=1)
self.predict1_1 = nn.Conv3d(128, 1, kernel_size=1)
self.predict2_4 = nn.Conv3d(64, 1, kernel_size=1)
self.predict2_3 = nn.Conv3d(64, 1, kernel_size=1)
self.predict2_2 = nn.Conv3d(64, 1, kernel_size=1)
self.predict2_1 = nn.Conv3d(64, 1, kernel_size=1)
self.predict = nn.Conv3d(64, 1, kernel_size=1)
def forward(self, x):
layer0 = self.backbone.layer0(x)
layer1 = self.backbone.layer1(layer0)
layer2 = self.backbone.layer2(layer1)
layer3 = self.backbone.layer3(layer2)
layer4 = self.backbone.layer4(layer3)
down4 = self.down4(layer4)
down3 = torch.add(F.upsample(down4, size=layer3.size()[2:], mode='trilinear'), self.down3(layer3))
down2 = torch.add(F.upsample(down3, size=layer2.size()[2:], mode='trilinear'), self.down2(layer2))
down1 = torch.add(F.upsample(down2, size=layer1.size()[2:], mode='trilinear'), self.down1(layer1))
down4 = F.upsample(down4, size=layer1.size()[2:], mode='trilinear')
down3 = F.upsample(down3, size=layer1.size()[2:], mode='trilinear')
down2 = F.upsample(down2, size=layer1.size()[2:], mode='trilinear')
predict1_4 = self.predict1_4(down4)
predict1_3 = self.predict1_3(down3)
predict1_2 = self.predict1_2(down2)
predict1_1 = self.predict1_1(down1)
fuse1 = self.fuse1(torch.cat((down4, down3, down2, down1), 1))
attention4 = self.attention4(torch.cat((down4, fuse1), 1))
attention3 = self.attention3(torch.cat((down3, fuse1), 1))
attention2 = self.attention2(torch.cat((down2, fuse1), 1))
attention1 = self.attention1(torch.cat((down1, fuse1), 1))
refine4 = self.refine4(torch.cat((down4, (attention4 * fuse1)), 1))
refine3 = self.refine3(torch.cat((down3, (attention3 * fuse1)), 1))
refine2 = self.refine2(torch.cat((down2, (attention2 * fuse1)), 1))
refine1 = self.refine1(torch.cat((down1, (attention1 * fuse1)), 1))
refine = self.refine(torch.cat((refine1, refine2, refine3, refine4), 1))
predict2_4 = self.predict2_4(refine4)
predict2_3 = self.predict2_3(refine3)
predict2_2 = self.predict2_2(refine2)
predict2_1 = self.predict2_1(refine1)
aspp1 = self.aspp1(refine)
aspp2 = self.aspp2(refine)
aspp3 = self.aspp3(refine)
aspp4 = self.aspp4(refine)
aspp = torch.cat((aspp1, aspp2, aspp3, aspp4), dim=1)
aspp = self.aspp_gn(self.aspp_conv(aspp))
predict = self.predict(aspp)
predict1_1 = F.upsample(predict1_1, size=x.size()[2:], mode='trilinear')
predict1_2 = F.upsample(predict1_2, size=x.size()[2:], mode='trilinear')
predict1_3 = F.upsample(predict1_3, size=x.size()[2:], mode='trilinear')
predict1_4 = F.upsample(predict1_4, size=x.size()[2:], mode='trilinear')
predict2_1 = F.upsample(predict2_1, size=x.size()[2:], mode='trilinear')
predict2_2 = F.upsample(predict2_2, size=x.size()[2:], mode='trilinear')
predict2_3 = F.upsample(predict2_3, size=x.size()[2:], mode='trilinear')
predict2_4 = F.upsample(predict2_4, size=x.size()[2:], mode='trilinear')
predict = F.upsample(predict, size=x.size()[2:], mode='trilinear')
if self.training:
return (predict1_1, predict1_2, predict1_3, predict1_4, predict2_1, predict2_2, predict2_3, predict2_4, predict)
else:
return predict |
class SequenceMapperSeq(SequenceMapper):
def __init__(self, *layers: SequenceMapper):
self.layers = layers
def apply(self, is_train, x, mask=None):
for (i, layer) in enumerate(self.layers):
with tf.variable_scope(('layer_' + str(i))):
x = layer.apply(is_train, x, mask)
return x
def get_params(self):
return dict(layers=[x.get_config() for x in self.layers]) |
def test_shorthand_property_storage():
model = Model()
node = Storage(model, 'node')
for attr in ('min_volume', 'max_volume', 'cost', 'level'):
setattr(node, attr, 123)
if (attr == 'conversion_factor'):
with pytest.raises(ValueError):
setattr(node, attr, Parameter(model))
else:
setattr(node, attr, Parameter(model))
with pytest.raises(TypeError):
setattr(node, attr, '123')
setattr(node, attr, None) |
def evaluate(loader, model):
model.eval()
correct = 0
total = 0
for (images, _, labels, _) in loader:
images = images.cuda()
labels = labels.cuda()
output1 = model(images)
(_, pred) = torch.max(output1.data, 1)
total += images.size(0)
correct += (pred == labels).sum().item()
acc = ((100 * float(correct)) / float(total))
return acc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.