code stringlengths 281 23.7M |
|---|
class TestRSAVerification():
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PKCS1v15())), skip_message='Does not support PKCS1v1.5.')
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
def test_pkcs1v15_verification(self, backend, subtests):
vectors = _flatten_pkcs1_examples(load_vectors_from_file(os.path.join('asymmetric', 'RSA', 'pkcs1v15sign-vectors.txt'), load_pkcs1_vectors))
for (private, public, example) in vectors:
with subtests.test():
public_key = rsa.RSAPublicNumbers(e=public['public_exponent'], n=public['modulus']).public_key(backend)
signature = binascii.unhexlify(example['signature'])
message = binascii.unhexlify(example['message'])
public_key.verify(signature, message, padding.PKCS1v15(), hashes.SHA1())
digest = hashes.Hash(hashes.SHA1())
digest.update(message)
msg_digest = digest.finalize()
rec_msg_digest = public_key.recover_data_from_signature(signature, padding.PKCS1v15(), hashes.SHA1())
assert (msg_digest == rec_msg_digest)
rec_sig_data = public_key.recover_data_from_signature(signature, padding.PKCS1v15(), None)
assert (len(rec_sig_data) > len(msg_digest))
assert (msg_digest == rec_sig_data[(- len(msg_digest)):])
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PKCS1v15())), skip_message='Does not support PKCS1v1.5.')
def test_invalid_pkcs1v15_signature_wrong_data(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
public_key = private_key.public_key()
signature = private_key.sign(b'sign me', padding.PKCS1v15(), hashes.SHA256())
with pytest.raises(InvalidSignature):
public_key.verify(signature, b'incorrect data', padding.PKCS1v15(), hashes.SHA256())
def test_invalid_pkcs1v15_signature_recover_wrong_hash_alg(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
public_key = private_key.public_key()
signature = private_key.sign(b'sign me', padding.PKCS1v15(), hashes.SHA256())
with pytest.raises(InvalidSignature):
public_key.recover_data_from_signature(signature, padding.PKCS1v15(), hashes.SHA512())
def test_invalid_signature_sequence_removed(self, backend):
key_der = binascii.unhexlify(b'd06092a864886f70dfaa2b451a07d0aa5f96ea8a5b462ebef717094fa1fee82224e637f9746d3f7cafd31878d80325b6ef5a1700f65903b469429e89d6eac8845097b5ab393189db92512ed8a7711a1253facd20f79c15e8247f3d3e42e46e48c98e254a2fe9765313a03eff8f17e1a029397a1fa26a8dce26f490edd9814c22da610428e09c7df5c021d0fceca08d945a12be82de4d1ece6b4c03145b5d3495d4ed5411eb878daf05fd7afc3e09ada0f1126422f590975a1969816f48698bcbba1b4d9cae79d460d8f9f85e7975005d9bc22c4e5ac0f7c1a45d12569a62807d3b9a02e5a530e773066f453d1f5b4c2e9cf7820283f742b9d')
sig = binascii.unhexlify(b'498209f59a0679a1f926eccf3056da2cba553d7ab3064e7c41ad1d739f038249f02f5ad12ee246073d101bc3cdb563e8b6beb7e6c16ad53deb12af5deaf41bb59c6597f3980132b7478fd0b95fd27dfad64a20fd5c25312bbd41a85286cd2a83c8df5efa0779158d01b0747ff165b055eb2880eaad8c5922cf6aa9d7e29b5056db5ded5eb20aeb31b8942e26b15a5188a4934cd7e39cfe379a197f49a204343a493452deebca436ee614f4daf989ef7e69ffa8ccc6a1e81cf0ab33c3e6da6a31bda3bb9a3003d3fd9daf7c4778b43fd46144d945d815f12628ff4')
public_key = serialization.load_der_public_key(key_der, backend)
assert isinstance(public_key, rsa.RSAPublicKey)
with pytest.raises(InvalidSignature):
public_key.verify(sig, binascii.unhexlify(b''), padding.PKCS1v15(), hashes.SHA256())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PKCS1v15())), skip_message='Does not support PKCS1v1.5.')
def test_invalid_pkcs1v15_signature_wrong_key(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
private_key2 = RSA_KEY_2048_ALT.private_key(backend, unsafe_skip_rsa_key_validation=True)
public_key = private_key2.public_key()
msg = b'sign me'
signature = private_key.sign(msg, padding.PKCS1v15(), hashes.SHA256())
with pytest.raises(InvalidSignature):
public_key.verify(signature, msg, padding.PKCS1v15(), hashes.SHA256())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=20))), skip_message='Does not support PSS.')
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
def test_pss_verification(self, subtests, backend):
for (private, public, example) in _flatten_pkcs1_examples(load_vectors_from_file(os.path.join('asymmetric', 'RSA', 'pkcs-1v2-1d2-vec', 'pss-vect.txt'), load_pkcs1_vectors)):
with subtests.test():
public_key = rsa.RSAPublicNumbers(e=public['public_exponent'], n=public['modulus']).public_key(backend)
public_key.verify(binascii.unhexlify(example['signature']), binascii.unhexlify(example['message']), padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=20), hashes.SHA1())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.AUTO))), skip_message='Does not support PSS.')
def test_pss_verify_auto_salt_length(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
signature = private_key.sign(b'some data', padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256())
private_key.public_key().verify(signature, b'some data', padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.AUTO), hashes.SHA256())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH))), skip_message='Does not support PSS.')
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
.skip_fips(reason='Unsupported key size in FIPS mode.')
def test_invalid_pss_signature_wrong_data(self, backend):
public_key = rsa.RSAPublicNumbers(n=int(b'dffc2137d5e810cde9e4b4612fbab913b3fa98bdf7982e4fa6ec4d6653ef2b29fb1642b095befcbea6decc178fb4bed243d3c3592c68546af2d3f3', 16), e=65537).public_key(backend)
signature = binascii.unhexlify(b'0e68c3649df91c5bc3665f96e157efa75b71934aaa514d91e94ca8418d100f456f05288e58525f99666bab052adcffdf7186eb40f583bd38d98c97d3d524808b')
with pytest.raises(InvalidSignature):
public_key.verify(signature, b'incorrect data', padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA1())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH))), skip_message='Does not support PSS.')
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
.skip_fips(reason='Unsupported key size in FIPS mode.')
def test_invalid_pss_signature_wrong_key(self, backend):
signature = binascii.unhexlify(b'3aba6eb53cc1449d13e5132ebcc0cfd9ade6d7a2494a0503bd0826f8a46c431e0d7be0ca3e453f8b2b009e2733764da7927cc6dbe7a021437a242e')
public_key = rsa.RSAPublicNumbers(n=int(b'381201f4905d67dfeb3dec131a0fbeaec7a1448c3109189ac685a95441be90866a14c4d2e139cd16db540ec6c7abab13ffff91443fd46a8960cbb7658ded26a5c95c86f6e40384e1c1239c63e541ba221191c4dd303231b42e33c6dbddf5ec9a746f09bf0c25d0f8d27f93ee0ae5c0d723348f4030d3581e13522e1', 16), e=65537).public_key(backend)
with pytest.raises(InvalidSignature):
public_key.verify(signature, b'sign me', padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA1())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH))), skip_message='Does not support PSS.')
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
.skip_fips(reason='Unsupported key size in FIPS mode.')
def test_invalid_pss_signature_data_too_large_for_modulus(self, backend):
signature = binascii.unhexlify(b'58750fc3d2f560d1f3e37c8e28bc8da6d3e93f5d58f8becd25b1c931eea30fea54cb17d44b90104a0aacb7fe9ffa2a59cd63de78178d21eb875ccd0b07121b641ed4fe6bcb1cab4f24bdba8a698a8e4e07e6bf2c47a736abe5a912e85cd32f648f3e043b4385e8b612dcce342c5fddf18c524deb56295b95f6dfa759b2896b793628a90f133e74c1ff7d3af43e3f7ee792df2e5b6a19e996aca437b3ae4e3ac91976c336c332a3b1db0d172b19cb40ad3d871296cfffb3c889ce74a179a3e290852c35d59525afe4b39dc907fad2ac462c50a488dca486031a3dc8c4cdbbc53e9f71d64732e1533a5d1249b833ce')
public_key = RSA_KEY_1024.private_key(unsafe_skip_rsa_key_validation=True).public_key()
with pytest.raises(InvalidSignature):
public_key.verify(signature, b'sign me', padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA1())
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
def test_invalid_pss_signature_recover(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
public_key = private_key.public_key()
pss_padding = padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH)
signature = private_key.sign(b'sign me', pss_padding, hashes.SHA256())
with pytest.raises(TypeError):
public_key.recover_data_from_signature(signature, pss_padding, None)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.recover_data_from_signature(signature, pss_padding, hashes.SHA256())
def test_unsupported_padding(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.verify(b'sig', b'msg', DummyAsymmetricPadding(), hashes.SHA256())
def test_padding_incorrect_type(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
public_key = private_key.public_key()
with pytest.raises(TypeError):
public_key.verify(b'sig', b'msg', 'notpadding', hashes.SHA256())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0))), skip_message='Does not support PSS.')
def test_unsupported_pss_mgf(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
public_key.verify(b'sig', b'msg', padding.PSS(mgf=DummyMGF(), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH))), skip_message='Does not support PSS.')
.supported(only_if=(lambda backend: backend.hash_supported(hashes.SHA512())), skip_message='Does not support SHA512.')
.skip_fips(reason='Unsupported key size in FIPS mode.')
def test_pss_verify_digest_too_large_for_key_size(self, rsa_key_512: rsa.RSAPrivateKey, backend):
private_key = rsa_key_512
signature = binascii.unhexlify(b'8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8534c050ef6b19b1bdc6eb4da422ea6f5b5cc16135b11eb6439b646bd')
public_key = private_key.public_key()
with pytest.raises(ValueError):
public_key.verify(signature, b"msg doesn't matter", padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA512())
.supported(only_if=(lambda backend: backend.rsa_padding_supported(padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=padding.PSS.MAX_LENGTH))), skip_message='Does not support PSS.')
.supported(only_if=(lambda backend: backend.signature_hash_supported(hashes.SHA1())), skip_message='Does not support SHA1 signature.')
.skip_fips(reason='Unsupported key size in FIPS mode.')
def test_pss_verify_salt_length_too_long(self, backend):
signature = binascii.unhexlify(b'8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8534c050ef6b19b1bdc6eb4da422ea6f5b5cc16135b11eb6439b646bd')
public_key = rsa.RSAPublicNumbers(n=int(b'd309eb747d7f9eb9cd3340f54fe42bb3f84a36933b0839c11b0c8b7f67e11fe31159c49c784d4bc41c42a78ce0f0b40a3ca8ffb91', 16), e=65537).public_key(backend)
with pytest.raises(InvalidSignature):
public_key.verify(signature, b'sign me', padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=1000000), hashes.SHA1())
def test_verify(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
message = b'one little message'
pkcs = padding.PKCS1v15()
algorithm = hashes.SHA256()
signature = private_key.sign(message, pkcs, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, pkcs, algorithm)
def test_prehashed_verify(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
message = b'one little message'
h = hashes.Hash(hashes.SHA256(), backend)
h.update(message)
digest = h.finalize()
prehashed_alg = asym_utils.Prehashed(hashes.SHA256())
pkcs = padding.PKCS1v15()
signature = private_key.sign(message, pkcs, hashes.SHA256())
public_key = private_key.public_key()
public_key.verify(signature, digest, pkcs, prehashed_alg)
def test_prehashed_digest_mismatch(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
public_key = rsa_key_2048.public_key()
message = b'one little message'
h = hashes.Hash(hashes.SHA256(), backend)
h.update(message)
data = h.finalize()
prehashed_alg = asym_utils.Prehashed(hashes.SHA512())
pkcs = padding.PKCS1v15()
with pytest.raises(ValueError):
public_key.verify((b'\x00' * 64), data, pkcs, prehashed_alg) |
def export(preprocessor: Union[('PreTrainedTokenizer', 'FeatureExtractionMixin')], model: Union[('PreTrainedModel', 'TFPreTrainedModel')], config: OnnxConfig, opset: int, output: Path, tokenizer: 'PreTrainedTokenizer'=None) -> Tuple[(List[str], List[str])]:
if (not (is_torch_available() or is_tf_available())):
raise ImportError('Cannot convert because neither PyTorch nor TensorFlow are not installed. Please install torch or tensorflow first.')
if (isinstance(preprocessor, PreTrainedTokenizerBase) and (tokenizer is not None)):
raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.')
if (tokenizer is not None):
warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)
logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.')
preprocessor = tokenizer
if is_torch_available():
from ..utils import torch_version
if (not is_torch_onnx_dict_inputs_support_available()):
raise AssertionError(f'Unsupported PyTorch version, minimum required is 1.8.0, got: {torch_version}')
if (not config.is_torch_support_available):
logger.warning(f'Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}, got: {torch_version}')
if (is_torch_available() and issubclass(type(model), PreTrainedModel)):
return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer)
elif (is_tf_available() and issubclass(type(model), TFPreTrainedModel)):
return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer) |
def get_config():
(config, warnings, errors) = process_cline()
(config, warnings, errors) = check_config(config, warnings, errors)
for warning in warnings:
print('WARNING:', warning)
for error in errors:
print('ERROR', error)
if len(errors):
sys.exit(2)
if ('pass' not in config):
config['pass'] = getpass.getpass()
if ('port' not in config):
if config['usessl']:
config['port'] = 993
else:
config['port'] = 143
if ('timeout' not in config):
config['timeout'] = 60
return config |
def _run_reader_iter(reader: Any, buf: bytes, do_eof: bool) -> Generator[(Any, None, None)]:
while True:
event = reader(buf)
if (event is None):
break
(yield event)
if (type(event) is EndOfMessage):
break
if do_eof:
assert (not buf)
(yield reader.read_eof()) |
def get_control_names(control, allcontrols, textcontrols):
names = []
friendly_class_name = control.friendly_class_name()
names.append(friendly_class_name)
cleaned = control.window_text()
if (cleaned and control.has_title):
names.append(cleaned)
names.append((cleaned + friendly_class_name))
elif (control.has_title and (friendly_class_name != 'TreeView')):
try:
for text in control.texts()[1:]:
names.append((friendly_class_name + text))
except Exception:
pass
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
if non_text_names:
names.extend(non_text_names)
else:
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
if non_text_names:
names.extend(non_text_names)
cleaned_names = (set(names) - set([None, '']))
return cleaned_names |
class MongoDBCollector(diamond.collector.Collector):
MAX_CRC32 =
def __init__(self, *args, **kwargs):
self.__totals = {}
super(MongoDBCollector, self).__init__(*args, **kwargs)
def get_default_config_help(self):
config_help = super(MongoDBCollector, self).get_default_config_help()
config_help.update({'hosts': 'Array of hostname(:port) elements to get metrics fromSet an alias by prefixing host:port with ', 'host': 'A single hostname(:port) to get metrics from (can be used instead of hosts and overrides it)', 'user': 'Username for authenticated login (optional)', 'passwd': 'Password for authenticated login (optional)', 'databases': 'A regex of which databases to gather metrics for. Defaults to all databases.', 'ignore_collections': 'A regex of which collections to ignore. MapReduce temporary collections (tmp.mr.*) are ignored by default.', 'collection_sample_rate': 'Only send stats for a consistent subset of collections. This is applied after collections are ignored via ignore_collections Sampling uses crc32 so it is consistent across replicas. Value between 0 and 1. Default is 1', 'network_timeout': 'Timeout for mongodb connection (in milliseconds). There is no timeout by default.', 'simple': 'Only collect the same metrics as mongostat.', 'translate_collections': 'Translate dot (.) to underscores (_) in collection names.', 'replace_dashes_in_metric_keys': 'Replace dashes (-) to dots (.) in database object names and metrics', 'ssl': 'True to enable SSL connections to the MongoDB server. Default is False', 'replica': 'True to enable replica set logging. Reports health of individual nodes as well as basic aggregate stats. Default is False', 'replset_node_name': 'Identifier for reporting replset metrics. Default is _id'})
return config_help
def get_default_config(self):
config = super(MongoDBCollector, self).get_default_config()
config.update({'path': 'mongo', 'hosts': ['localhost'], 'user': None, 'passwd': None, 'databases': '.*', 'ignore_collections': '^tmp\\.mr\\.', 'network_timeout': None, 'simple': 'False', 'translate_collections': 'False', 'replace_dashes_in_metric_keys': 'True', 'collection_sample_rate': 1, 'ssl': False, 'replica': False, 'replset_node_name': '_id'})
return config
def collect(self):
if (pymongo is None):
self.log.error('Unable to import pymongo')
return
hosts = self.config.get('hosts')
if isinstance(hosts, basestring):
hosts = [hosts]
if ('host' in self.config):
hosts = [self.config['host']]
if self.config['network_timeout']:
self.config['network_timeout'] = int(self.config['network_timeout'])
if self.config['collection_sample_rate']:
self.config['collection_sample_rate'] = float(self.config['collection_sample_rate'])
if ('user' in self.config):
user = self.config['user']
else:
user = None
if ('passwd' in self.config):
passwd = self.config['passwd']
else:
passwd = None
for host in hosts:
matches = re.search('((.+)\\)?(.+)?', host)
alias = matches.group(2)
host = matches.group(3)
if (alias is None):
if (len(hosts) == 1):
base_prefix = []
else:
base_prefix = [re.sub('[:\\.]', '_', host)]
else:
base_prefix = [alias]
try:
if (type(self.config['ssl']) is str):
self.config['ssl'] = str_to_bool(self.config['ssl'])
if (ReadPreference is None):
conn = pymongo.MongoClient(host, socketTimeoutMS=self.config['network_timeout'], ssl=self.config['ssl'])
else:
conn = pymongo.MongoClient(host, socketTimeoutMS=self.config['network_timeout'], ssl=self.config['ssl'], read_preference=ReadPreference.SECONDARY)
except Exception as e:
self.log.error('Couldnt connect to mongodb: %s', e)
continue
if user:
try:
conn.admin.authenticate(user, passwd)
except Exception as e:
self.log.error(('User auth given, but could not autheticate' + (' with host: %s, err: %s' % (host, e))))
return {}
data = conn.db.command('serverStatus')
self._publish_transformed(data, base_prefix)
if str_to_bool(self.config['simple']):
data = self._extract_simple_data(data)
if str_to_bool(self.config['replica']):
try:
replset_data = conn.admin.command('replSetGetStatus')
self._publish_replset(replset_data, base_prefix)
except pymongo.errors.OperationFailure as e:
self.log.error('error getting replica set status', e)
self._publish_dict_with_prefix(data, base_prefix)
db_name_filter = re.compile(self.config['databases'])
ignored_collections = re.compile(self.config['ignore_collections'])
sample_threshold = (self.MAX_CRC32 * self.config['collection_sample_rate'])
for db_name in conn.database_names():
if (not db_name_filter.search(db_name)):
continue
db_stats = conn[db_name].command('dbStats')
db_prefix = (base_prefix + ['databases', db_name])
self._publish_dict_with_prefix(db_stats, db_prefix)
for collection_name in conn[db_name].collection_names():
if ignored_collections.search(collection_name):
continue
if ((self.config['collection_sample_rate'] < 1) and ((zlib.crc32(collection_name) & ) > sample_threshold)):
continue
collection_stats = conn[db_name].command('collstats', collection_name)
if str_to_bool(self.config['translate_collections']):
collection_name = collection_name.replace('.', '_')
collection_prefix = (db_prefix + [collection_name])
self._publish_dict_with_prefix(collection_stats, collection_prefix)
def _publish_replset(self, data, base_prefix):
prefix = (base_prefix + ['replset'])
self._publish_dict_with_prefix(data, prefix)
total_nodes = len(data['members'])
healthy_nodes = reduce((lambda value, node: (value + node['health'])), data['members'], 0)
self._publish_dict_with_prefix({'healthy_nodes': healthy_nodes, 'total_nodes': total_nodes}, prefix)
for node in data['members']:
replset_node_name = node[self.config['replset_node_name']]
node_name = str(replset_node_name.split('.')[0])
self._publish_dict_with_prefix(node, (prefix + ['node', node_name]))
def _publish_transformed(self, data, base_prefix):
self._publish_dict_with_prefix(data.get('opcounters', {}), (base_prefix + ['opcounters_per_sec']), self.publish_counter)
self._publish_dict_with_prefix(data.get('opcountersRepl', {}), (base_prefix + ['opcountersRepl_per_sec']), self.publish_counter)
self._publish_metrics((base_prefix + ['backgroundFlushing_per_sec']), 'flushes', data.get('backgroundFlushing', {}), self.publish_counter)
self._publish_dict_with_prefix(data.get('network', {}), (base_prefix + ['network_per_sec']), self.publish_counter)
self._publish_metrics((base_prefix + ['extra_info_per_sec']), 'page_faults', data.get('extra_info', {}), self.publish_counter)
def get_dotted_value(data, key_name):
key_name = key_name.split('.')
for i in key_name:
data = data.get(i, {})
if (not data):
return 0
return data
def compute_interval(data, total_name):
current_total = get_dotted_value(data, total_name)
total_key = '.'.join((base_prefix + [total_name]))
last_total = self.__totals.get(total_key, current_total)
interval = (current_total - last_total)
self.__totals[total_key] = current_total
return interval
def publish_percent(value_name, total_name, data):
value = float((get_dotted_value(data, value_name) * 100))
interval = compute_interval(data, total_name)
key = '.'.join((base_prefix + ['percent', value_name]))
self.publish_counter(key, value, time_delta=bool(interval), interval=interval)
publish_percent('globalLock.lockTime', 'globalLock.totalTime', data)
publish_percent('indexCounters.btree.misses', 'indexCounters.btree.accesses', data)
locks = data.get('locks')
if locks:
if ('.' in locks):
locks['_global_'] = locks['.']
del locks['.']
key_prefix = '.'.join((base_prefix + ['percent']))
db_name_filter = re.compile(self.config['databases'])
interval = compute_interval(data, 'uptimeMillis')
for db_name in locks:
if (not db_name_filter.search(db_name)):
continue
r = get_dotted_value(locks, ('%s.timeLockedMicros.r' % db_name))
R = get_dotted_value(locks, ('.%s.timeLockedMicros.R' % db_name))
value = (float((r + R)) / 10)
if value:
self.publish_counter((key_prefix + ('.locks.%s.read' % db_name)), value, time_delta=bool(interval), interval=interval)
w = get_dotted_value(locks, ('%s.timeLockedMicros.w' % db_name))
W = get_dotted_value(locks, ('%s.timeLockedMicros.W' % db_name))
value = (float((w + W)) / 10)
if value:
self.publish_counter((key_prefix + ('.locks.%s.write' % db_name)), value, time_delta=bool(interval), interval=interval)
def _publish_dict_with_prefix(self, dict, prefix, publishfn=None):
for key in dict:
self._publish_metrics(prefix, key, dict, publishfn)
def _publish_metrics(self, prev_keys, key, data, publishfn=None):
if (key not in data):
return
value = data[key]
keys = (prev_keys + [key])
keys = [x.replace(' ', '_') for x in keys]
if str_to_bool(self.config['replace_dashes_in_metric_keys']):
keys = [x.replace('-', '.') for x in keys]
if (not publishfn):
publishfn = self.publish
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(keys, new_key, value)
elif (isinstance(value, int) or isinstance(value, float)):
publishfn('.'.join(keys), value)
elif isinstance(value, long):
publishfn('.'.join(keys), float(value))
elif isinstance(value, datetime.datetime):
publishfn('.'.join(keys), long(value.strftime('%s')))
def _extract_simple_data(self, data):
return {'connections': data.get('connections'), 'globalLock': data.get('globalLock'), 'indexCounters': data.get('indexCounters')} |
def main(args):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
logger.info('device: {}, n_gpu: {}, 16-bits training: {}'.format(device, n_gpu, args.fp16))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
if args.do_train:
assert ((args.train_file is not None) and (args.dev_file is not None))
if args.eval_test:
assert (args.test_file is not None)
else:
assert (args.dev_file is not None)
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
if args.do_train:
logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, 'train.log'), 'w'))
else:
logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, 'eval.log'), 'w'))
logger.info(args)
tokenizer = BertTokenizer.from_pretrained(args.model, do_lower_case=args.do_lower_case)
query_templates = read_query_templates(normal_file=args.normal_file, des_file=args.des_file)
if (args.do_train or (not args.eval_test)):
eval_examples = read_ace_examples(input_file=args.dev_file, is_training=False)
gold_examples = read_ace_examples(input_file=args.gold_file, is_training=False)
eval_features = convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer, query_templates=query_templates, nth_query=args.nth_query, is_training=False)
logger.info('***** Dev *****')
logger.info(' Num orig examples = %d', len(eval_examples))
logger.info(' Num split examples = %d', len(eval_features))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_if_trigger_ids = torch.tensor([f.if_trigger_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_if_trigger_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size)
if args.do_train:
train_examples = read_ace_examples(input_file=args.train_file, is_training=True)
train_features = convert_examples_to_features(examples=train_examples, tokenizer=tokenizer, query_templates=query_templates, nth_query=args.nth_query, is_training=True)
if ((args.train_mode == 'sorted') or (args.train_mode == 'random_sorted')):
train_features = sorted(train_features, key=(lambda f: np.sum(f.input_mask)))
else:
random.shuffle(train_features)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_if_trigger_ids = torch.tensor([f.if_trigger_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_if_trigger_ids, all_start_positions, all_end_positions)
train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size)
train_batches = [batch for batch in train_dataloader]
num_train_optimization_steps = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
logger.info('***** Train *****')
logger.info(' Num orig examples = %d', len(train_examples))
logger.info(' Num split examples = %d', len(train_features))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
eval_step = max(1, (len(train_batches) // args.eval_per_epoch))
best_result = None
lrs = ([args.learning_rate] if args.learning_rate else [1e-06, 2e-06, 3e-06, 5e-06, 1e-05, 2e-05, 3e-05, 5e-05])
for lr in lrs:
if (not args.add_if_trigger_embedding):
model = BertForQuestionAnswering.from_pretrained(args.model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE)
else:
model = BertForQuestionAnswering_withIfTriggerEmbedding.from_pretrained(args.model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE)
if args.fp16:
model.half()
model.to(device)
if (n_gpu > 1):
model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer if ('pooler' not in n[0])]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
tr_loss = 0
nb_tr_examples = 0
nb_tr_steps = 0
global_step = 0
start_time = time.time()
for epoch in range(int(args.num_train_epochs)):
model.train()
logger.info('Start epoch #{} (lr = {})...'.format(epoch, lr))
if ((args.train_mode == 'random') or (args.train_mode == 'random_sorted')):
random.shuffle(train_batches)
for (step, batch) in enumerate(train_batches):
if (n_gpu == 1):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, if_trigger_ids, start_positions, end_positions) = batch
if (not args.add_if_trigger_embedding):
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
else:
loss = model(input_ids, segment_ids, if_trigger_ids, input_mask, start_positions, end_positions)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
loss.backward()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
optimizer.step()
optimizer.zero_grad()
global_step += 1
if ((((step + 1) % eval_step) == 0) or (step == 0)):
save_model = False
if args.do_eval:
(result, preds) = evaluate(args, model, device, eval_dataloader, eval_examples, gold_examples, eval_features)
model.train()
result['global_step'] = global_step
result['epoch'] = epoch
result['learning_rate'] = lr
result['batch_size'] = args.train_batch_size
if ((best_result is None) or (result[args.eval_metric] > best_result[args.eval_metric])):
best_result = result
save_model = True
logger.info('Epoch: {}, Step: {} / {}, used_time = {:.2f}s, loss = {:.6f}'.format(epoch, (step + 1), len(train_batches), (time.time() - start_time), (tr_loss / nb_tr_steps)))
logger.info(('!!! Best dev %s (lr=%s, epoch=%d): p_c: %.2f, r_c: %.2f, f1_c: %.2f, p_i: %.2f, r_i: %.2f, f1_i: %.2f, best_na_thresh: %.5f' % (args.eval_metric, str(lr), epoch, result['prec_c'], result['recall_c'], result['f1_c'], result['prec_i'], result['recall_i'], result['f1_i'], result['best_na_thresh'])))
else:
save_model = True
if ((((int(args.num_train_epochs) - epoch) < 3) and (((step + 1) / len(train_batches)) > 0.7)) or (step == 0)):
save_model = True
else:
save_model = False
if save_model:
model_to_save = (model.module if hasattr(model, 'module') else model)
subdir = os.path.join(args.output_dir, 'epoch{epoch}-step{step}'.format(epoch=epoch, step=step))
if (not os.path.exists(subdir)):
os.makedirs(subdir)
output_model_file = os.path.join(subdir, WEIGHTS_NAME)
output_config_file = os.path.join(subdir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(subdir)
if best_result:
with open(os.path.join(args.output_dir, 'eval_results.txt'), 'w') as writer:
for key in sorted(best_result.keys()):
writer.write(('%s = %s\n' % (key, str(best_result[key]))))
if args.do_eval:
if args.eval_test:
eval_examples = read_ace_examples(input_file=args.test_file, is_training=False)
gold_examples = read_ace_examples(input_file=args.gold_file, is_training=False)
eval_features = convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer, query_templates=query_templates, nth_query=args.nth_query, is_training=False)
logger.info('***** Test *****')
logger.info(' Num orig examples = %d', len(eval_examples))
logger.info(' Num split examples = %d', len(eval_features))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_if_trigger_ids = torch.tensor([f.if_trigger_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_if_trigger_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size)
if (not args.add_if_trigger_embedding):
model = BertForQuestionAnswering.from_pretrained(args.model_dir)
else:
model = BertForQuestionAnswering_withIfTriggerEmbedding.from_pretrained(args.model_dir)
if args.fp16:
model.half()
model.to(device)
(result, preds) = evaluate(args, model, device, eval_dataloader, eval_examples, gold_examples, eval_features, pred_only=True)
with open(os.path.join(args.model_dir, 'test_results.txt'), 'w') as writer:
for key in result:
writer.write(('%s = %s\n' % (key, str(result[key]))))
with open(os.path.join(args.model_dir, 'arg_predictions.json'), 'w') as writer:
for key in preds:
writer.write((json.dumps(preds[key], default=int) + '\n')) |
def test_teardown_logging(pytester: Pytester) -> None:
pytester.makepyfile("\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo():\n logger.info('text going to logger from call')\n\n def teardown_function(function):\n logger.info('text going to logger from teardown')\n assert False\n ")
result = pytester.runpytest('--log-level=INFO')
assert (result.ret == 1)
result.stdout.fnmatch_lines(['*- Captured *log call -*', '*text going to logger from call*', '*- Captured *log teardown -*', '*text going to logger from teardown*']) |
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(rrule.YEARLY, byweekday=(rrule.SA, rrule.SU), cache=True, dtstart=start, until=end)
non_trading_rules.append(weekends)
new_years = rrule.rrule(rrule.MONTHLY, byyearday=1, cache=True, dtstart=start, until=end)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(rrule.MONTHLY, byyearday=2, byweekday=rrule.MO, cache=True, dtstart=start, until=end)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(rrule.MONTHLY, bymonth=1, byweekday=rrule.MO((+ 3)), cache=True, dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc), until=end)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(rrule.MONTHLY, bymonth=2, byweekday=rrule.MO(3), cache=True, dtstart=start, until=end)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(rrule.DAILY, byeaster=(- 2), cache=True, dtstart=start, until=end)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(rrule.MONTHLY, bymonth=5, byweekday=rrule.MO((- 1)), cache=True, dtstart=start, until=end)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(rrule.MONTHLY, bymonth=7, bymonthday=4, cache=True, dtstart=start, until=end)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(rrule.MONTHLY, bymonth=7, bymonthday=5, byweekday=rrule.MO, cache=True, dtstart=start, until=end)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(rrule.MONTHLY, bymonth=7, bymonthday=3, byweekday=rrule.FR, cache=True, dtstart=start, until=end)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(rrule.MONTHLY, bymonth=9, byweekday=rrule.MO(1), cache=True, dtstart=start, until=end)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(rrule.MONTHLY, bymonth=11, byweekday=rrule.TH(4), cache=True, dtstart=start, until=end)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(rrule.MONTHLY, bymonth=12, bymonthday=25, cache=True, dtstart=start, until=end)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(rrule.MONTHLY, bymonth=12, bymonthday=26, byweekday=rrule.MO, cache=True, dtstart=start, until=end)
non_trading_rules.append(christmas_sunday)
christmas_saturday = rrule.rrule(rrule.MONTHLY, bymonth=12, bymonthday=24, byweekday=rrule.FR, cache=True, dtstart=start, until=end)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
for day_num in range(11, 17):
non_trading_days.append(datetime(2001, 9, day_num, tzinfo=pytz.utc))
for day_num in range(29, 31):
non_trading_days.append(datetime(2012, 10, day_num, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days) |
class variable_size_graph():
def __init__(self, task_parameters):
vocab_size = task_parameters['Voc']
nb_of_clust = task_parameters['nb_clusters_target']
clust_size_min = task_parameters['size_min']
clust_size_max = task_parameters['size_max']
p = task_parameters['p']
q = task_parameters['q']
self_loop = True
W0 = task_parameters['W0']
u0 = task_parameters['u0']
(W, c) = block.unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)
u = np.random.randint(vocab_size, size=W.shape[0])
(W, c) = block.add_a_block(W0, W, c, nb_of_clust, q)
u = np.concatenate((u, u0), axis=0)
(W, c, idx) = block.schuffle(W, c)
u = u[idx]
u = torch.from_numpy(u)
u = u.long()
if self_loop:
for i in range(W.shape[0]):
W[(i, i)] = 1
target = (c == nb_of_clust).astype(float)
target = torch.from_numpy(target)
target = target.long()
W_coo = sp.coo_matrix(W)
nb_edges = W_coo.nnz
nb_vertices = W.shape[0]
edge_to_starting_vertex = sp.coo_matrix((np.ones(nb_edges), (np.arange(nb_edges), W_coo.row)), shape=(nb_edges, nb_vertices))
edge_to_ending_vertex = sp.coo_matrix((np.ones(nb_edges), (np.arange(nb_edges), W_coo.col)), shape=(nb_edges, nb_vertices))
self.adj_matrix = W
self.edge_to_starting_vertex = edge_to_starting_vertex
self.edge_to_ending_vertex = edge_to_ending_vertex
self.signal = u
self.target = target |
class BufferOperation(enum.IntFlag):
discard_read_buffer = VI_READ_BUF
discard_read_buffer_no_io = VI_READ_BUF_DISCARD
flush_write_buffer = VI_WRITE_BUF
discard_write_buffer = VI_WRITE_BUF_DISCARD
discard_receive_buffer = VI_IO_IN_BUF_DISCARD
discard_receive_buffer2 = VI_IO_IN_BUF
flush_transmit_buffer = VI_IO_OUT_BUF
discard_transmit_buffer = VI_IO_OUT_BUF_DISCARD |
class TestWebsiteCollector(CollectorTestCase):
def setUp(self, config=None):
if (config is None):
config = get_collector_config('WebsiteCollector', {'url': ''})
else:
config = get_collector_config('WebsiteCollector', config)
self.collector = WebsiteMonitorCollector(config, None)
self.patcher = patch('urllib2.urlopen')
self.urlopen_mock = self.patcher.start()
def test_import(self):
self.assertTrue(WebsiteMonitorCollector)
(Collector, 'publish')
def test_websitemonitorcollector_with_data(self, publish_mock):
self.collector.collect()
self.urlopen_mock.return_value = MockResponse(200)
metrics = {}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany([publish_mock], metrics)
(Collector, 'publish')
def test_websitemonitorcollector(self, publish_mock):
self.setUp()
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
def tearDown(self):
self.patcher.stop() |
def test_point_distance():
with pytest.raises(AssertionError):
utils.point_distance([1, 2], [1, 2])
with pytest.raises(AssertionError):
p = np.array([1, 2, 3])
utils.point_distance(p, p)
p = np.array([1, 2])
assert (utils.point_distance(p, p) == 0)
p1 = np.array([2, 2])
assert (utils.point_distance(p, p1) == 1) |
class SwaggerDeprecatedTest(object):
def test_doc_parser_parameters(self, api):
parser = api.parser()
parser.add_argument('param', type=int, help='Some param')
with pytest.warns(DeprecationWarning):
('/with-parser/')
class WithParserResource(restx.Resource):
(parser=parser)
def get(self):
return {}
assert ('parser' not in WithParserResource.get.__apidoc__)
assert ('expect' in WithParserResource.get.__apidoc__)
doc_parser = WithParserResource.get.__apidoc__['expect'][0]
assert (doc_parser.__schema__ == parser.__schema__)
def test_doc_method_parser_on_class(self, api):
parser = api.parser()
parser.add_argument('param', type=int, help='Some param')
with pytest.warns(DeprecationWarning):
('/with-parser/')
(get={'parser': parser})
class WithParserResource(restx.Resource):
def get(self):
return {}
def post(self):
return {}
assert ('parser' not in WithParserResource.__apidoc__['get'])
assert ('expect' in WithParserResource.__apidoc__['get'])
doc_parser = WithParserResource.__apidoc__['get']['expect'][0]
assert (doc_parser.__schema__ == parser.__schema__)
def test_doc_body_as_tuple(self, api):
fields = api.model('Person', {'name': restx.fields.String, 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
with pytest.warns(DeprecationWarning):
('/model-as-dict/')
class ModelAsDict(restx.Resource):
(body=(fields, 'Body description'))
def post(self):
return {}
assert ('body' not in ModelAsDict.post.__apidoc__)
assert (ModelAsDict.post.__apidoc__['expect'] == [(fields, 'Body description')])
def test_build_request_body_parameters_schema(self):
parser = restx.reqparse.RequestParser()
parser.add_argument('test', type=int, location='headers')
parser.add_argument('test1', type=int, location='json')
parser.add_argument('test2', location='json')
body_params = [p for p in parser.__schema__ if (p['in'] == 'body')]
result = restx.swagger.build_request_body_parameters_schema(body_params)
assert (result['name'] == 'payload')
assert result['required']
assert (result['in'] == 'body')
assert (result['schema']['type'] == 'object')
assert (result['schema']['properties']['test1']['type'] == 'integer')
assert (result['schema']['properties']['test2']['type'] == 'string')
def test_expect_unused_model(self, app, api, client):
from flask_restx import fields
api.model('SomeModel', {'param': fields.String, 'count': fields.Integer})
('/with-parser/', endpoint='with-parser')
class WithParserResource(restx.Resource):
def get(self):
return {}
app.config['RESTX_INCLUDE_ALL_MODELS'] = True
data = client.get_specs()
assert ('/with-parser/' in data['paths'])
path = data['paths']['/with-parser/']
assert ('parameters' not in path)
model = data['definitions']['SomeModel']
assert (model == {'properties': {'count': {'type': 'integer'}, 'param': {'type': 'string'}}, 'type': 'object'})
def test_not_expect_unused_model(self, app, api, client):
from flask_restx import fields
api.model('SomeModel', {'param': fields.String, 'count': fields.Integer})
('/with-parser/', endpoint='with-parser')
class WithParserResource(restx.Resource):
def get(self):
return {}
data = client.get_specs()
assert ('/with-parser/' in data['paths'])
assert ('definitions' not in data)
path = data['paths']['/with-parser/']
assert ('parameters' not in path)
def test_nondefault_swagger_filename(self, app, client):
api = restx.Api(doc='/doc/test', default_swagger_filename='test.json')
ns = restx.Namespace('ns1')
('/test1')
class Ns(restx.Resource):
('Docs')
def get(self):
pass
api.add_namespace(ns)
api.init_app(app)
resp = client.get('/test.json')
assert (resp.status_code == 200)
assert (resp.content_type == 'application/json')
resp = client.get('/doc/test')
assert (resp.status_code == 200)
assert (resp.content_type == 'text/html; charset=utf-8')
resp = client.get('/ns1/test1')
assert (resp.status_code == 200) |
class SeasonalTiltMount(pvsystem.AbstractMount):
monthly_tilts: list
surface_azimuth: float = 180.0
def get_orientation(self, solar_zenith, solar_azimuth):
tilts = [self.monthly_tilts[(m - 1)] for m in solar_zenith.index.month]
return pd.DataFrame({'surface_tilt': tilts, 'surface_azimuth': self.surface_azimuth}, index=solar_zenith.index) |
class TestEnvVars(EnvironmentTestCase):
def test_run_no_env(self, runner, target):
env = self.run_environ(runner, *target, environ={'USER': 'romain'})
assert (env.get('USER') == 'romain')
def test_run_env(self, runner, target):
env = self.run_environ(runner, *target, '--env', 'USER=serious', environ={'USER': 'romain'})
assert (env.get('USER') == 'serious')
def test_run_env_mixed(self, runner, target):
env = self.run_environ(runner, *target, '--env', 'ONE=1', '--env', 'TWO="2"', environ={'USER': 'romain'})
assert (env.get('USER') == 'romain')
assert (env.get('ONE') == '1')
assert (env.get('TWO') == '2')
def test_run_default_env(self, runner, target):
env = self.run_environ(runner, *target, '--default-env', 'USER=clown')
assert (env.get('USER') == 'clown')
env = self.run_environ(runner, *target, '--default-env', 'USER=clown', environ={'USER': 'romain'})
assert (env.get('USER') == 'romain')
env = self.run_environ(runner, *target, '--env', 'USER=serious', '--default-env', 'USER=clown', environ={'USER': 'romain'})
assert (env.get('USER') == 'serious') |
class ChainBiMapper(SequenceBiMapper):
def __init__(self, first_layer: SequenceMapper, second_layer: SequenceMapper):
self.first_layer = first_layer
self.second_layer = second_layer
def apply(self, is_train, x, mask=None):
with tf.variable_scope('out'):
m1 = self.first_layer.apply(is_train, x, mask)
with tf.variable_scope('chained-out'):
m2 = self.second_layer.apply(is_train, tf.concat([x, m1], axis=2), mask)
return (m1, m2) |
def generate_packets() -> List[bytes]:
out = DNSOutgoing((const._FLAGS_QR_RESPONSE | const._FLAGS_AA))
address = socket.inet_pton(socket.AF_INET, '192.168.208.5')
additionals = [{'name': 'HASS Bridge ZJWH FF5137._hap._tcp.local.', 'address': address, 'port': 51832, 'text': b'\x13md=HASS Bridge ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=L0m/aQ=='}, {'name': 'HASS Bridge 3K9A C2582A._hap._tcp.local.', 'address': address, 'port': 51834, 'text': b'\x13md=HASS Bridge 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=b2CnzQ=='}, {'name': 'Master Bed TV CEDB27._hap._tcp.local.', 'address': address, 'port': 51830, 'text': b'\x10md=Master Bed TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05ci=31\x04sf=0\x0bsh=CVj1kw=='}, {'name': 'Living Room TV 921B77._hap._tcp.local.', 'address': address, 'port': 51833, 'text': b'\x11md=Living Room TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05ci=31\x04sf=0\x0bsh=qU77SQ=='}, {'name': 'HASS Bridge ZC8X FF413D._hap._tcp.local.', 'address': address, 'port': 51829, 'text': b'\x13md=HASS Bridge ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=b0QZlg=='}, {'name': 'HASS Bridge WLTF 4BE61F._hap._tcp.local.', 'address': address, 'port': 51837, 'text': b'\x13md=HASS Bridge WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ahAISA=='}, {'name': 'FrontdoorCamera 8941D1._hap._tcp.local.', 'address': address, 'port': 54898, 'text': b'\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA=='}, {'name': 'HASS Bridge W9DN 5B5CC5._hap._tcp.local.', 'address': address, 'port': 51836, 'text': b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A=='}, {'name': 'HASS Bridge Y9OO EFF0A7._hap._tcp.local.', 'address': address, 'port': 51838, 'text': b'\x13md=HASS Bridge Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=u3bdfw=='}, {'name': 'Snooze Room TV 6B89B0._hap._tcp.local.', 'address': address, 'port': 51835, 'text': b'\x11md=Snooze Room TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05ci=31\x04sf=0\x0bsh=xNTqsg=='}, {'name': 'AlexanderHomeAssistant 74651D._hap._tcp.local.', 'address': address, 'port': 54811, 'text': b'\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA=='}, {'name': 'HASS Bridge OS95 39C053._hap._tcp.local.', 'address': address, 'port': 51831, 'text': b'\x13md=HASS Bridge OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=Xfe5LQ=='}]
out.add_answer_at_time(DNSText('HASS Bridge W9DN 5B5CC5._hap._tcp.local.', const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_OTHER_TTL, b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A=='), 0)
for record in additionals:
out.add_additional_answer(DNSService(record['name'], const._TYPE_SRV, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_HOST_TTL, 0, 0, record['port'], record['name']))
out.add_additional_answer(DNSText(record['name'], const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_OTHER_TTL, record['text']))
out.add_additional_answer(DNSAddress(record['name'], const._TYPE_A, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_HOST_TTL, record['address']))
out.add_additional_answer(DNSNsec(record['name'], const._TYPE_NSEC, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_OTHER_TTL, record['name'], [const._TYPE_TXT, const._TYPE_SRV]))
return out.packets() |
class Config():
(frozen=True)
class InvocationParams():
args: Tuple[(str, ...)]
plugins: Optional[Sequence[Union[(str, _PluggyPlugin)]]]
dir: Path
def __init__(self, *, args: Iterable[str], plugins: Optional[Sequence[Union[(str, _PluggyPlugin)]]], dir: Path) -> None:
object.__setattr__(self, 'args', tuple(args))
object.__setattr__(self, 'plugins', plugins)
object.__setattr__(self, 'dir', dir)
class ArgsSource(enum.Enum):
ARGS = enum.auto()
INVOCATION_DIR = enum.auto()
INCOVATION_DIR = INVOCATION_DIR
TESTPATHS = enum.auto()
def __init__(self, pluginmanager: PytestPluginManager, *, invocation_params: Optional[InvocationParams]=None) -> None:
from .argparsing import Parser, FILE_OR_DIR
if (invocation_params is None):
invocation_params = self.InvocationParams(args=(), plugins=None, dir=Path.cwd())
self.option = argparse.Namespace()
self.invocation_params = invocation_params
_a = FILE_OR_DIR
self._parser = Parser(usage=f'%(prog)s [options] [{_a}] [{_a}] [...]', processopt=self._processopt, _ispytest=True)
self.pluginmanager = pluginmanager
self.stash = Stash()
self._store = self.stash
self.trace = self.pluginmanager.trace.root.get('config')
self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook)
self._inicache: Dict[(str, Any)] = {}
self._override_ini: Sequence[str] = ()
self._opt2dest: Dict[(str, str)] = {}
self._cleanup: List[Callable[([], None)]] = []
self.pluginmanager.register(self, 'pytestconfig')
self._configured = False
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager))
self.args_source = Config.ArgsSource.ARGS
self.args: List[str] = []
if TYPE_CHECKING:
from _pytest.cacheprovider import Cache
self.cache: Optional[Cache] = None
def rootpath(self) -> Path:
return self._rootpath
def inipath(self) -> Optional[Path]:
return self._inipath
def add_cleanup(self, func: Callable[([], None)]) -> None:
self._cleanup.append(func)
def _do_configure(self) -> None:
assert (not self._configured)
self._configured = True
with warnings.catch_warnings():
warnings.simplefilter('default')
self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
def _ensure_unconfigure(self) -> None:
if self._configured:
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.hook.pytest_configure._call_history = []
while self._cleanup:
fin = self._cleanup.pop()
fin()
def get_terminal_writer(self) -> TerminalWriter:
terminalreporter: Optional[TerminalReporter] = self.pluginmanager.get_plugin('terminalreporter')
assert (terminalreporter is not None)
return terminalreporter._tw
def pytest_cmdline_parse(self, pluginmanager: PytestPluginManager, args: List[str]) -> 'Config':
try:
self.parse(args)
except UsageError:
if (getattr(self.option, 'version', False) or ('--version' in args)):
from _pytest.helpconfig import showversion
showversion(self)
elif (getattr(self.option, 'help', False) or ('--help' in args) or ('-h' in args)):
self._parser._getparser().print_help()
sys.stdout.write('\nNOTE: displaying only minimal help due to UsageError.\n\n')
raise
return self
def notify_exception(self, excinfo: ExceptionInfo[BaseException], option: Optional[argparse.Namespace]=None) -> None:
if (option and getattr(option, 'fulltrace', False)):
style: _TracebackStyle = 'long'
else:
style = 'native'
excrepr = excinfo.getrepr(funcargs=True, showlocals=getattr(option, 'showlocals', False), style=style)
res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
if (not any(res)):
for line in str(excrepr).split('\n'):
sys.stderr.write(('INTERNALERROR> %s\n' % line))
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid: str) -> str:
if (self.invocation_params.dir != self.rootpath):
fullpath = (self.rootpath / nodeid)
nodeid = bestrelpath(self.invocation_params.dir, fullpath)
return nodeid
def fromdictargs(cls, option_dict, args) -> 'Config':
config = get_config(args)
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _processopt(self, opt: 'Argument') -> None:
for name in (opt._short_opts + opt._long_opts):
self._opt2dest[name] = opt.dest
if hasattr(opt, 'default'):
if (not hasattr(self.option, opt.dest)):
setattr(self.option, opt.dest, opt.default)
(trylast=True)
def pytest_load_initial_conftests(self, early_config: 'Config') -> None:
(args, args_source) = early_config._decide_args(args=early_config.known_args_namespace.file_or_dir, pyargs=early_config.known_args_namespace.pyargs, testpaths=early_config.getini('testpaths'), invocation_dir=early_config.invocation_params.dir, rootpath=early_config.rootpath, warn=False)
self.pluginmanager._set_initial_conftests(args=args, pyargs=early_config.known_args_namespace.pyargs, noconftest=early_config.known_args_namespace.noconftest, rootpath=early_config.rootpath, confcutdir=early_config.known_args_namespace.confcutdir, importmode=early_config.known_args_namespace.importmode)
def _initini(self, args: Sequence[str]) -> None:
(ns, unknown_args) = self._parser.parse_known_and_unknown_args(args, namespace=copy.copy(self.option))
(rootpath, inipath, inicfg) = determine_setup(ns.inifilename, (ns.file_or_dir + unknown_args), rootdir_cmd_arg=(ns.rootdir or None), invocation_dir=self.invocation_params.dir)
self._rootpath = rootpath
self._inipath = inipath
self.inicfg = inicfg
self._parser.extra_info['rootdir'] = str(self.rootpath)
self._parser.extra_info['inifile'] = str(self.inipath)
self._parser.addini('addopts', 'Extra command line options', 'args')
self._parser.addini('minversion', 'Minimally required pytest version')
self._parser.addini('required_plugins', 'Plugins that must be present for pytest to run', type='args', default=[])
self._override_ini = (ns.override_ini or ())
def _consider_importhook(self, args: Sequence[str]) -> None:
(ns, unknown_args) = self._parser.parse_known_and_unknown_args(args)
mode = getattr(ns, 'assertmode', 'plain')
if (mode == 'rewrite'):
import _pytest.assertion
try:
hook = _pytest.assertion.install_importhook(self)
except SystemError:
mode = 'plain'
else:
self._mark_plugins_for_rewrite(hook)
self._warn_about_missing_assertion(mode)
def _mark_plugins_for_rewrite(self, hook) -> None:
self.pluginmanager.rewrite_hook = hook
if os.environ.get('PYTEST_DISABLE_PLUGIN_AUTOLOAD'):
return
package_files = (str(file) for dist in importlib.metadata.distributions() if any(((ep.group == 'pytest11') for ep in dist.entry_points)) for file in (dist.files or []))
for name in _iter_rewritable_modules(package_files):
hook.mark_rewrite(name)
def _validate_args(self, args: List[str], via: str) -> List[str]:
self._parser._config_source_hint = via
try:
self._parser.parse_known_and_unknown_args(args, namespace=copy.copy(self.option))
finally:
del self._parser._config_source_hint
return args
def _decide_args(self, *, args: List[str], pyargs: bool, testpaths: List[str], invocation_dir: Path, rootpath: Path, warn: bool) -> Tuple[(List[str], ArgsSource)]:
if args:
source = Config.ArgsSource.ARGS
result = args
else:
if (invocation_dir == rootpath):
source = Config.ArgsSource.TESTPATHS
if pyargs:
result = testpaths
else:
result = []
for path in testpaths:
result.extend(sorted(glob.iglob(path, recursive=True)))
if (testpaths and (not result)):
if warn:
warning_text = 'No files were found in testpaths; consider removing or adjusting your testpaths configuration. Searching recursively from the current directory instead.'
self.issue_config_time_warning(PytestConfigWarning(warning_text), stacklevel=3)
else:
result = []
if (not result):
source = Config.ArgsSource.INVOCATION_DIR
result = [str(invocation_dir)]
return (result, source)
def _preparse(self, args: List[str], addopts: bool=True) -> None:
if addopts:
env_addopts = os.environ.get('PYTEST_ADDOPTS', '')
if len(env_addopts):
args[:] = (self._validate_args(shlex.split(env_addopts), 'via PYTEST_ADDOPTS') + args)
self._initini(args)
if addopts:
args[:] = (self._validate_args(self.getini('addopts'), 'via addopts config') + args)
self.known_args_namespace = self._parser.parse_known_args(args, namespace=copy.copy(self.option))
self._checkversion()
self._consider_importhook(args)
self.pluginmanager.consider_preparse(args, exclude_only=False)
if (not os.environ.get('PYTEST_DISABLE_PLUGIN_AUTOLOAD')):
self.pluginmanager.load_setuptools_entrypoints('pytest11')
self.pluginmanager.consider_env()
self.known_args_namespace = self._parser.parse_known_args(args, namespace=copy.copy(self.known_args_namespace))
self._validate_plugins()
self._warn_about_skipped_plugins()
if self.known_args_namespace.strict:
self.issue_config_time_warning(_pytest.deprecated.STRICT_OPTION, stacklevel=2)
if (self.known_args_namespace.confcutdir is None):
if (self.inipath is not None):
confcutdir = str(self.inipath.parent)
else:
confcutdir = str(self.rootpath)
self.known_args_namespace.confcutdir = confcutdir
try:
self.hook.pytest_load_initial_conftests(early_config=self, args=args, parser=self._parser)
except ConftestImportFailure as e:
if (self.known_args_namespace.help or self.known_args_namespace.version):
self.issue_config_time_warning(PytestConfigWarning(f'could not load initial conftests: {e.path}'), stacklevel=2)
else:
raise
(wrapper=True)
def pytest_collection(self) -> Generator[(None, object, object)]:
try:
return (yield)
finally:
self._validate_config_options()
def _checkversion(self) -> None:
import pytest
minver = self.inicfg.get('minversion', None)
if minver:
from packaging.version import Version
if (not isinstance(minver, str)):
raise pytest.UsageError(("%s: 'minversion' must be a single value" % self.inipath))
if (Version(minver) > Version(pytest.__version__)):
raise pytest.UsageError(("%s: 'minversion' requires pytest-%s, actual pytest-%s'" % (self.inipath, minver, pytest.__version__)))
def _validate_config_options(self) -> None:
for key in sorted(self._get_unknown_ini_keys()):
self._warn_or_fail_if_strict(f'''Unknown config option: {key}
''')
def _validate_plugins(self) -> None:
required_plugins = sorted(self.getini('required_plugins'))
if (not required_plugins):
return
from packaging.version import Version
from packaging.requirements import InvalidRequirement, Requirement
plugin_info = self.pluginmanager.list_plugin_distinfo()
plugin_dist_info = {dist.project_name: dist.version for (_, dist) in plugin_info}
missing_plugins = []
for required_plugin in required_plugins:
try:
req = Requirement(required_plugin)
except InvalidRequirement:
missing_plugins.append(required_plugin)
continue
if (req.name not in plugin_dist_info):
missing_plugins.append(required_plugin)
elif (not req.specifier.contains(Version(plugin_dist_info[req.name]), prereleases=True)):
missing_plugins.append(required_plugin)
if missing_plugins:
raise UsageError('Missing required plugins: {}'.format(', '.join(missing_plugins)))
def _warn_or_fail_if_strict(self, message: str) -> None:
if self.known_args_namespace.strict_config:
raise UsageError(message)
self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3)
def _get_unknown_ini_keys(self) -> List[str]:
parser_inicfg = self._parser._inidict
return [name for name in self.inicfg if (name not in parser_inicfg)]
def parse(self, args: List[str], addopts: bool=True) -> None:
assert (self.args == []), 'can only parse cmdline args at most once per Config object'
self.hook.pytest_addhooks.call_historic(kwargs=dict(pluginmanager=self.pluginmanager))
self._preparse(args, addopts=addopts)
self.hook.pytest_cmdline_preparse(config=self, args=args)
self._parser.after_preparse = True
try:
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
(self.args, self.args_source) = self._decide_args(args=args, pyargs=self.known_args_namespace.pyargs, testpaths=self.getini('testpaths'), invocation_dir=self.invocation_params.dir, rootpath=self.rootpath, warn=True)
except PrintHelp:
pass
def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None:
if self.pluginmanager.is_blocked('warnings'):
return
cmdline_filters = (self.known_args_namespace.pythonwarnings or [])
config_filters = self.getini('filterwarnings')
with warnings.catch_warnings(record=True) as records:
warnings.simplefilter('always', type(warning))
apply_warning_filters(config_filters, cmdline_filters)
warnings.warn(warning, stacklevel=stacklevel)
if records:
frame = sys._getframe((stacklevel - 1))
location = (frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name)
self.hook.pytest_warning_recorded.call_historic(kwargs=dict(warning_message=records[0], when='config', nodeid='', location=location))
def addinivalue_line(self, name: str, line: str) -> None:
x = self.getini(name)
assert isinstance(x, list)
x.append(line)
def getini(self, name: str):
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini_unknown_type(self, name: str, type: str, value: Union[(str, List[str])]):
msg = f'unknown configuration type: {type}'
raise ValueError(msg, value)
def _getini(self, name: str):
try:
(description, type, default) = self._parser._inidict[name]
except KeyError as e:
raise ValueError(f'unknown configuration value: {name!r}') from e
override_value = self._get_override_ini_value(name)
if (override_value is None):
try:
value = self.inicfg[name]
except KeyError:
return default
else:
value = override_value
if (type == 'paths'):
assert (self.inipath is not None)
dp = self.inipath.parent
input_values = (shlex.split(value) if isinstance(value, str) else value)
return [(dp / x) for x in input_values]
elif (type == 'args'):
return (shlex.split(value) if isinstance(value, str) else value)
elif (type == 'linelist'):
if isinstance(value, str):
return [t for t in map((lambda x: x.strip()), value.split('\n')) if t]
else:
return value
elif (type == 'bool'):
return _strtobool(str(value).strip())
elif (type == 'string'):
return value
elif (type is None):
return value
else:
return self._getini_unknown_type(name, type, value)
def _getconftest_pathlist(self, name: str, path: Path) -> Optional[List[Path]]:
try:
(mod, relroots) = self.pluginmanager._rget_with_confmod(name, path)
except KeyError:
return None
assert (mod.__file__ is not None)
modpath = Path(mod.__file__).parent
values: List[Path] = []
for relroot in relroots:
if isinstance(relroot, os.PathLike):
relroot = Path(relroot)
else:
relroot = relroot.replace('/', os.sep)
relroot = absolutepath((modpath / relroot))
values.append(relroot)
return values
def _get_override_ini_value(self, name: str) -> Optional[str]:
value = None
for ini_config in self._override_ini:
try:
(key, user_ini_value) = ini_config.split('=', 1)
except ValueError as e:
raise UsageError('-o/--override-ini expects option=value style (got: {!r}).'.format(ini_config)) from e
else:
if (key == name):
value = user_ini_value
return value
def getoption(self, name: str, default=notset, skip: bool=False):
name = self._opt2dest.get(name, name)
try:
val = getattr(self.option, name)
if ((val is None) and skip):
raise AttributeError(name)
return val
except AttributeError as e:
if (default is not notset):
return default
if skip:
import pytest
pytest.skip(f'no {name!r} option found')
raise ValueError(f'no option named {name!r}') from e
def getvalue(self, name: str, path=None):
return self.getoption(name)
def getvalueorskip(self, name: str, path=None):
return self.getoption(name, skip=True)
VERBOSITY_ASSERTIONS: Final = 'assertions'
_VERBOSITY_INI_DEFAULT: Final = 'auto'
def get_verbosity(self, verbosity_type: Optional[str]=None) -> int:
global_level = self.option.verbose
assert isinstance(global_level, int)
if (verbosity_type is None):
return global_level
ini_name = Config._verbosity_ini_name(verbosity_type)
if (ini_name not in self._parser._inidict):
return global_level
level = self.getini(ini_name)
if (level == Config._VERBOSITY_INI_DEFAULT):
return global_level
return int(level)
def _verbosity_ini_name(verbosity_type: str) -> str:
return f'verbosity_{verbosity_type}'
def _add_verbosity_ini(parser: 'Parser', verbosity_type: str, help: str) -> None:
parser.addini(Config._verbosity_ini_name(verbosity_type), help=help, type='string', default=Config._VERBOSITY_INI_DEFAULT)
def _warn_about_missing_assertion(self, mode: str) -> None:
if (not _assertion_supported()):
if (mode == 'plain'):
warning_text = 'ASSERTIONS ARE NOT EXECUTED and FAILING TESTS WILL PASS. Are you using python -O?'
else:
warning_text = 'assertions not in test modules or plugins will be ignored because assert statements are not executed by the underlying Python interpreter (are you using python -O?)\n'
self.issue_config_time_warning(PytestConfigWarning(warning_text), stacklevel=3)
def _warn_about_skipped_plugins(self) -> None:
for (module_name, msg) in self.pluginmanager.skipped_plugins:
self.issue_config_time_warning(PytestConfigWarning(f'skipped plugin {module_name!r}: {msg}'), stacklevel=2) |
class Effect6098(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Missile Launcher Operation')), 'reloadTime', ship.getModifiedItemAttr('shipBonusTacticalDestroyerCaldari2'), skill='Caldari Tactical Destroyer', **kwargs) |
class Person(QObject):
def __init__(self, parent=None):
super(Person, self).__init__(parent)
self._name = ''
self._shoe = ShoeDescription()
(str)
def name(self):
return self._name
def name(self, name):
self._name = name
(ShoeDescription)
def shoe(self):
return self._shoe |
class Request(Awaitable[W]):
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None
self.qcomm_ctx = None
self.rsi = None
self.agi = None
self.wait_function = None
self.dummy_tensor: torch.Tensor = torch.empty(1, requires_grad=True, device=device)
def _wait_impl(self) -> W:
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret |
def run_setup(setup_script, args):
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = ([setup_script] + list(args))
sys.path.insert(0, setup_dir)
working_set.__init__()
working_set.callbacks.append((lambda dist: dist.activate()))
with DirectorySandbox(setup_dir):
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if (v.args and v.args[0]):
raise |
class CIFAR10Policy():
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [SubPolicy(0.1, 'invert', 7, 0.2, 'contrast', 6, fillcolor), SubPolicy(0.7, 'rotate', 2, 0.3, 'translateX', 9, fillcolor), SubPolicy(0.8, 'sharpness', 1, 0.9, 'sharpness', 3, fillcolor), SubPolicy(0.5, 'shearY', 8, 0.7, 'translateY', 9, fillcolor), SubPolicy(0.5, 'autocontrast', 8, 0.9, 'equalize', 2, fillcolor), SubPolicy(0.2, 'shearY', 7, 0.3, 'posterize', 7, fillcolor), SubPolicy(0.4, 'color', 3, 0.6, 'brightness', 7, fillcolor), SubPolicy(0.3, 'sharpness', 9, 0.7, 'brightness', 9, fillcolor), SubPolicy(0.6, 'equalize', 5, 0.5, 'equalize', 1, fillcolor), SubPolicy(0.6, 'contrast', 7, 0.6, 'sharpness', 5, fillcolor), SubPolicy(0.7, 'color', 7, 0.5, 'translateX', 8, fillcolor), SubPolicy(0.3, 'equalize', 7, 0.4, 'autocontrast', 8, fillcolor), SubPolicy(0.4, 'translateY', 3, 0.2, 'sharpness', 6, fillcolor), SubPolicy(0.9, 'brightness', 6, 0.2, 'color', 8, fillcolor), SubPolicy(0.5, 'solarize', 2, 0.0, 'invert', 3, fillcolor), SubPolicy(0.2, 'equalize', 0, 0.6, 'autocontrast', 0, fillcolor), SubPolicy(0.2, 'equalize', 8, 0.6, 'equalize', 4, fillcolor), SubPolicy(0.9, 'color', 9, 0.6, 'equalize', 6, fillcolor), SubPolicy(0.8, 'autocontrast', 4, 0.2, 'solarize', 8, fillcolor), SubPolicy(0.1, 'brightness', 3, 0.7, 'color', 0, fillcolor), SubPolicy(0.4, 'solarize', 5, 0.9, 'autocontrast', 3, fillcolor), SubPolicy(0.9, 'translateY', 9, 0.7, 'translateY', 9, fillcolor), SubPolicy(0.9, 'autocontrast', 2, 0.8, 'solarize', 3, fillcolor), SubPolicy(0.8, 'equalize', 8, 0.1, 'invert', 3, fillcolor), SubPolicy(0.7, 'translateY', 9, 0.9, 'autocontrast', 1, fillcolor)]
def __call__(self, img):
policy_idx = random.randint(0, (len(self.policies) - 1))
return self.policies[policy_idx](img)
def __repr__(self):
return 'AutoAugment CIFAR10 Policy' |
_session
def test_runningmeanstd():
for (x1, x2, x3) in [(np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2))]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
U.initialize()
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.std(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean.eval(), rms.std.eval()]
assert np.allclose(ms1, ms2) |
class FC():
_activations = {None: tf.identity, 'ReLU': tf.nn.relu, 'tanh': tf.tanh, 'sigmoid': tf.sigmoid, 'softmax': tf.nn.softmax, 'swish': (lambda x: (x * tf.sigmoid(x)))}
def __init__(self, output_dim, input_dim=None, activation=None, weight_decay=None, ensemble_size=1):
(self.input_dim, self.output_dim) = (input_dim, output_dim)
self.activation = activation
self.weight_decay = weight_decay
self.ensemble_size = ensemble_size
self.variables_constructed = False
(self.weights, self.biases) = (None, None)
self.decays = None
def __repr__(self):
return 'FC(output_dim={!r}, input_dim={!r}, activation={!r}, weight_decay={!r}, ensemble_size={!r})'.format(self.output_dim, self.input_dim, self.activation, self.weight_decay, self.ensemble_size)
def get_model_vars(self, idx, sess):
(weights, biases) = sess.run([self.weights, self.biases])
weight = weights[idx].copy()
bias = biases[idx].copy()
return {'weights': weight, 'biases': bias}
def set_model_vars(self, idx, sess, variables):
for (attr, var) in variables.items():
tensor = getattr(self, attr)
op = tensor[idx].assign(var)
sess.run(op)
def set_model_vars(self, variables):
ops = [getattr(self, attr).assign(var) for (attr, var) in variables.items()]
return ops
def reset(self, sess):
sess.run(self.weights.initializer)
sess.run(self.biases.initializer)
def compute_output_tensor(self, input_tensor):
weights = self.weights
if (len(input_tensor.shape) == 2):
raw_output = (tf.einsum('ij,ajk->aik', input_tensor, weights) + self.biases)
elif ((len(input_tensor.shape) == 3) and (input_tensor.shape[0] == self.ensemble_size)):
raw_output = (tf.matmul(input_tensor, weights) + self.biases)
else:
raise ValueError('Invalid input dimension.')
return FC._activations[self.activation](raw_output)
def get_decays(self):
return self.decays
def copy(self, sess=None):
new_layer = eval(repr(self))
return new_layer
def construct_vars(self):
if self.variables_constructed:
return
if ((self.input_dim is None) or (self.output_dim is None)):
raise RuntimeError('Cannot construct variables without fully specifying input and output dimensions.')
self.weights = tf.get_variable('FC_weights', shape=[self.ensemble_size, self.input_dim, self.output_dim], initializer=tf.truncated_normal_initializer(stddev=(1 / (2 * np.sqrt(self.input_dim)))))
self.biases = tf.get_variable('FC_biases', shape=[self.ensemble_size, 1, self.output_dim], initializer=tf.constant_initializer(0.0))
if (self.weight_decay is not None):
self.decays = [tf.multiply(self.weight_decay, tf.nn.l2_loss(self.weights), name='weight_decay')]
self.variables_constructed = True
def get_vars(self):
return [self.weights, self.biases]
def get_input_dim(self):
return self.input_dim
def set_input_dim(self, input_dim):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.input_dim = input_dim
def get_output_dim(self):
return self.output_dim
def set_output_dim(self, output_dim):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.output_dim = output_dim
def get_activation(self, as_func=True):
if as_func:
return FC._activations[self.activation]
else:
return self.activation
def set_activation(self, activation):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.activation = activation
def unset_activation(self):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.set_activation(None)
def get_weight_decay(self):
return self.weight_decay
def set_weight_decay(self, weight_decay):
self.weight_decay = weight_decay
if self.variables_constructed:
if (self.weight_decay is not None):
self.decays = [tf.multiply(self.weight_decay, tf.nn.l2_loss(self.weights), name='weight_decay')]
def unset_weight_decay(self):
self.set_weight_decay(None)
if self.variables_constructed:
self.decays = []
def set_ensemble_size(self, ensemble_size):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.ensemble_size = ensemble_size
def get_ensemble_size(self):
return self.ensemble_size |
def test_generator(game_enum):
from randovania.generator.base_patches_factory import BasePatchesFactory
from randovania.generator.hint_distributor import HintDistributor
from randovania.resolver.bootstrap import Bootstrap
g = game_enum.generator
assert isinstance(g.bootstrap, Bootstrap)
assert isinstance(g.base_patches_factory, BasePatchesFactory)
if (g.hint_distributor is not None):
assert isinstance(g.hint_distributor, HintDistributor) |
def getDoomsdayMult(mod, tgt, distance, tgtSigRadius):
modRange = mod.maxRange
if ((distance is not None) and modRange and (distance > modRange)):
return 0
if {'superWeaponAmarr', 'superWeaponCaldari', 'superWeaponGallente', 'superWeaponMinmatar'}.intersection(mod.item.effects):
if (tgt.isFit and (not tgt.item.ship.item.requiresSkill('Capital Ships'))):
return 0
damageSig = mod.getModifiedItemAttr('signatureRadius')
if (not damageSig):
return 1
return min(1, (tgtSigRadius / damageSig)) |
def save_import_snapshot_values(project, snapshots, checked):
for snapshot in snapshots:
assert (snapshot.pk is None)
snapshot.project = project
snapshot.save(copy_values=False)
for value in snapshot.snapshot_values:
if value.attribute:
value_key = f'{value.attribute.uri}[{snapshot.snapshot_index}][{value.set_prefix}][{value.set_index}][{value.collection_index}]'
if (value_key in checked):
assert (value.pk is None)
value.project = project
value.snapshot = snapshot
value.save()
if value.file:
value.copy_file(value.file_name, value.file)
else:
try:
name = value.file_import.get('name')
file = value.file_import.get('file')
value.file.save(name, file)
except AttributeError:
pass |
def test_read_psm3_map_variables():
(data, metadata) = psm3.read_psm3(MANUAL_TEST_DATA, map_variables=True)
columns_mapped = ['Year', 'Month', 'Day', 'Hour', 'Minute', 'dhi', 'ghi', 'dni', 'ghi_clear', 'dhi_clear', 'dni_clear', 'Cloud Type', 'temp_dew', 'solar_zenith', 'Fill Flag', 'albedo', 'wind_speed', 'wind_direction', 'precipitable_water', 'relative_humidity', 'temp_air', 'pressure']
(data, metadata) = psm3.read_psm3(MANUAL_TEST_DATA, map_variables=True)
assert_index_equal(data.columns, pd.Index(columns_mapped)) |
def test_replace_output_layer():
with tempfile.TemporaryDirectory() as tmp_dir:
saved_model_dir = os.path.join(tmp_dir, 'saved_model')
inp = tf.keras.layers.Input(shape=(2,))
x = tf.keras.layers.Dense(units=1)(inp)
x = tf.keras.layers.Dense(units=2)(x)
model = tf.keras.Model(inputs=inp, outputs=x, name='replace_output_layer_model')
test_inp = np.array([[1, 2]])
_ = model.predict(test_inp)
new_layer = tf.keras.layers.Dense(units=3)
old_dense = model.layers[2]
replace_layer_in_functional_model(model, old_dense, new_layer)
tf.keras.models.save_model(model, saved_model_dir)
new_model = tf.keras.models.load_model(saved_model_dir)
out = new_model(test_inp)
assert (out.shape == [1, 3]) |
class UnavailableSession(Session):
session_issue: ClassVar[str]
def __init__(self, *args, **kwargs) -> None:
raise ValueError(self.session_issue)
def _get_attribute(self, attr):
raise NotImplementedError()
def _set_attribute(self, attr, value):
raise NotImplementedError()
def close(self):
raise NotImplementedError() |
def test_annotation_based_injection_works_in_provider_methods():
class MyModule(Module):
def configure(self, binder):
binder.bind(int, to=42)
def provide_str(self, i: int) -> str:
return str(i)
def provide_object(self) -> object:
return object()
injector = Injector(MyModule)
assert (injector.get(str) == '42')
assert (injector.get(object) is injector.get(object)) |
def torch_full(*args, **kwargs):
args = list(args)
if (isinstance(args[1], torch.Tensor) and (args[1].device == torch.device('meta'))):
args[1] = 1
kwargs_without_device = dict(kwargs)
kwargs_without_device.pop('device', None)
return torch.full(*args, **kwargs_without_device) |
class PeleeBranch2(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels):
super(PeleeBranch2, self).__init__()
self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels)
self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=out_channels)
self.conv3 = conv3x3_block(in_channels=out_channels, out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x |
class MMGNet():
def __init__(self, config):
self.config = config
self.model_name = self.config.NAME
self.mconfig = mconfig = config.MODEL
self.exp = config.exp
self.save_res = config.EVAL
self.update_2d = config.update_2d
dataset = None
if (config.MODE == 'train'):
if config.VERBOSE:
print('build train dataset')
self.dataset_train = build_dataset(self.config, split_type='train_scans', shuffle_objs=True, multi_rel_outputs=mconfig.multi_rel_outputs, use_rgb=mconfig.USE_RGB, use_normal=mconfig.USE_NORMAL)
self.dataset_train.__getitem__(0)
if ((config.MODE == 'train') or (config.MODE == 'trace')):
if config.VERBOSE:
print('build valid dataset')
self.dataset_valid = build_dataset(self.config, split_type='validation_scans', shuffle_objs=False, multi_rel_outputs=mconfig.multi_rel_outputs, use_rgb=mconfig.USE_RGB, use_normal=mconfig.USE_NORMAL)
dataset = self.dataset_valid
num_obj_class = len(self.dataset_valid.classNames)
num_rel_class = len(self.dataset_valid.relationNames)
self.num_obj_class = num_obj_class
self.num_rel_class = num_rel_class
self.total = self.config.total = (len(self.dataset_train) // self.config.Batch_Size)
self.max_iteration = self.config.max_iteration = int(((float(self.config.MAX_EPOCHES) * len(self.dataset_train)) // self.config.Batch_Size))
self.max_iteration_scheduler = self.config.max_iteration_scheduler = int(((float(100) * len(self.dataset_train)) // self.config.Batch_Size))
self.model = Mmgnet(self.config, num_obj_class, num_rel_class).to(config.DEVICE)
self.samples_path = os.path.join(config.PATH, self.model_name, self.exp, 'samples')
self.results_path = os.path.join(config.PATH, self.model_name, self.exp, 'results')
self.trace_path = os.path.join(config.PATH, self.model_name, self.exp, 'traced')
self.writter = None
if (not self.config.EVAL):
pth_log = os.path.join(config.PATH, 'logs', self.model_name, self.exp)
self.writter = SummaryWriter(pth_log)
def load(self, best=False):
return self.model.load(best)
_grad()
def data_processing_train(self, items):
(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids) = items
obj_points = obj_points.permute(0, 2, 1).contiguous()
(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids) = self.cuda(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids)
return (obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids)
_grad()
def data_processing_val(self, items):
(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids) = items
obj_points = obj_points.permute(0, 2, 1).contiguous()
(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids) = self.cuda(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids)
return (obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids)
def train(self):
drop_last = True
train_loader = CustomDataLoader(config=self.config, dataset=self.dataset_train, batch_size=self.config.Batch_Size, num_workers=self.config.WORKERS, drop_last=drop_last, shuffle=True, collate_fn=collate_fn_mmg)
self.model.epoch = 1
keep_training = True
if (self.total == 1):
print("No training data was provided! Check 'TRAIN_FLIST' value in the configuration file.")
return
progbar = op_utils.Progbar(self.total, width=20, stateful_metrics=['Misc/epo', 'Misc/it', 'Misc/lr'])
loader = iter(train_loader)
if (self.mconfig.use_pretrain != ''):
self.model.load_pretrain_model(self.mconfig.use_pretrain, is_freeze=True)
for (k, p) in self.model.named_parameters():
if p.requires_grad:
print(f'Para {k} need grad')
' Train '
while keep_training:
if (self.model.epoch > self.config.MAX_EPOCHES):
break
print(('\n\nTraining epoch: %d' % self.model.epoch))
for items in loader:
self.model.train()
(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids) = self.data_processing_train(items)
logs = self.model.process_train(obj_points, obj_2d_feats, gt_class, descriptor, gt_rel_cls, edge_indices, batch_ids, with_log=True, weights_obj=self.dataset_train.w_cls_obj, weights_rel=self.dataset_train.w_cls_rel, ignore_none_rel=False)
iteration = self.model.iteration
logs += [('Misc/epo', int(self.model.epoch)), ('Misc/it', int(iteration)), ('lr', self.model.lr_scheduler.get_last_lr()[0])]
progbar.add(1, values=(logs if self.config.VERBOSE else [x for x in logs if (not x[0].startswith('Loss'))]))
if (self.config.LOG_INTERVAL and ((iteration % self.config.LOG_INTERVAL) == 0)):
self.log(logs, iteration)
if (self.model.iteration >= self.max_iteration):
break
progbar = op_utils.Progbar(self.total, width=20, stateful_metrics=['Misc/epo', 'Misc/it'])
loader = iter(train_loader)
self.save()
if (('VALID_INTERVAL' in self.config) and (self.config.VALID_INTERVAL > 0) and ((self.model.epoch % self.config.VALID_INTERVAL) == 0)):
print('start validation...')
rel_acc_val = self.validation()
self.model.eva_res = rel_acc_val
self.save()
self.model.epoch += 1
def cuda(self, *args):
return [item.to(self.config.DEVICE) for item in args]
def log(self, logs, iteration):
if ((self.writter is not None) and (not self.config.EVAL)):
for i in logs:
if (not i[0].startswith('Misc')):
self.writter.add_scalar(i[0], i[1], iteration)
def save(self):
self.model.save()
def validation(self, debug_mode=False):
val_loader = CustomDataLoader(config=self.config, dataset=self.dataset_valid, batch_size=1, num_workers=self.config.WORKERS, drop_last=False, shuffle=False, collate_fn=collate_fn_mmg)
total = len(self.dataset_valid)
progbar = op_utils.Progbar(total, width=20, stateful_metrics=['Misc/it'])
print('=== start evaluation ===')
self.model.eval()
(topk_obj_list, topk_rel_list, topk_triplet_list, cls_matrix_list, edge_feature_list) = (np.array([]), np.array([]), np.array([]), [], [])
(sub_scores_list, obj_scores_list, rel_scores_list) = ([], [], [])
(topk_obj_2d_list, topk_rel_2d_list, topk_triplet_2d_list) = (np.array([]), np.array([]), np.array([]))
for (i, items) in enumerate(val_loader, 0):
(obj_points, obj_2d_feats, gt_class, gt_rel_cls, edge_indices, descriptor, batch_ids) = self.data_processing_val(items)
with torch.no_grad():
(top_k_obj, top_k_obj_2d, top_k_rel, top_k_rel_2d, tok_k_triplet, top_k_2d_triplet, cls_matrix, sub_scores, obj_scores, rel_scores) = self.model.process_val(obj_points, obj_2d_feats, gt_class, descriptor, gt_rel_cls, edge_indices, batch_ids, use_triplet=True)
' calculate metrics '
topk_obj_list = np.concatenate((topk_obj_list, top_k_obj))
topk_obj_2d_list = np.concatenate((topk_obj_2d_list, top_k_obj_2d))
topk_rel_list = np.concatenate((topk_rel_list, top_k_rel))
topk_rel_2d_list = np.concatenate((topk_rel_2d_list, top_k_rel_2d))
topk_triplet_list = np.concatenate((topk_triplet_list, tok_k_triplet))
topk_triplet_2d_list = np.concatenate((topk_triplet_2d_list, top_k_2d_triplet))
if (cls_matrix is not None):
cls_matrix_list.extend(cls_matrix)
sub_scores_list.extend(sub_scores)
obj_scores_list.extend(obj_scores)
rel_scores_list.extend(rel_scores)
logs = [('/obj_cls_acc', (((topk_obj_list <= 1).sum() * 100) / len(topk_obj_list))), ('/obj_cls_2d_acc', (((topk_obj_2d_list <= 1).sum() * 100) / len(topk_obj_2d_list))), ('/obj_cls_acc', (((topk_obj_list <= 5).sum() * 100) / len(topk_obj_list))), ('/obj_cls_2d_acc', (((topk_obj_2d_list <= 5).sum() * 100) / len(topk_obj_2d_list))), ('/obj_cls_acc', (((topk_obj_list <= 10).sum() * 100) / len(topk_obj_list))), ('/obj_cls_2d_acc', (((topk_obj_2d_list <= 10).sum() * 100) / len(topk_obj_2d_list))), ('/rel_cls_acc', (((topk_rel_list <= 1).sum() * 100) / len(topk_rel_list))), ('/rel_cls_2d_acc', (((topk_rel_2d_list <= 1).sum() * 100) / len(topk_rel_2d_list))), ('/rel_cls_acc', (((topk_rel_list <= 3).sum() * 100) / len(topk_rel_list))), ('/rel_cls_2d_acc', (((topk_rel_2d_list <= 3).sum() * 100) / len(topk_rel_2d_list))), ('/rel_cls_acc', (((topk_rel_list <= 5).sum() * 100) / len(topk_rel_list))), ('/rel_cls_2d_acc', (((topk_rel_2d_list <= 5).sum() * 100) / len(topk_rel_2d_list))), ('/triplet_acc', (((topk_triplet_list <= 50).sum() * 100) / len(topk_triplet_list))), ('/triplet_2d_acc', (((topk_triplet_2d_list <= 50).sum() * 100) / len(topk_triplet_2d_list))), ('/triplet_acc', (((topk_triplet_list <= 100).sum() * 100) / len(topk_triplet_list))), ('/triplet_2d_acc', (((topk_triplet_2d_list <= 100).sum() * 100) / len(topk_triplet_2d_list)))]
progbar.add(1, values=(logs if self.config.VERBOSE else [x for x in logs if (not x[0].startswith('Loss'))]))
cls_matrix_list = np.stack(cls_matrix_list)
sub_scores_list = np.stack(sub_scores_list)
obj_scores_list = np.stack(obj_scores_list)
rel_scores_list = np.stack(rel_scores_list)
mean_recall = get_mean_recall(topk_triplet_list, cls_matrix_list)
mean_recall_2d = get_mean_recall(topk_triplet_2d_list, cls_matrix_list)
(zero_shot_recall, non_zero_shot_recall, all_zero_shot_recall) = get_zero_shot_recall(topk_triplet_list, cls_matrix_list, self.dataset_valid.classNames, self.dataset_valid.relationNames)
if self.model.config.EVAL:
save_path = os.path.join(self.config.PATH, 'results', self.model_name, self.exp)
os.makedirs(save_path, exist_ok=True)
np.save(os.path.join(save_path, 'topk_pred_list.npy'), topk_rel_list)
np.save(os.path.join(save_path, 'topk_triplet_list.npy'), topk_triplet_list)
np.save(os.path.join(save_path, 'cls_matrix_list.npy'), cls_matrix_list)
np.save(os.path.join(save_path, 'sub_scores_list.npy'), sub_scores_list)
np.save(os.path.join(save_path, 'obj_scores_list.npy'), obj_scores_list)
np.save(os.path.join(save_path, 'rel_scores_list.npy'), rel_scores_list)
f_in = open(os.path.join(save_path, 'result.txt'), 'w')
else:
f_in = None
obj_acc_1 = (((topk_obj_list <= 1).sum() * 100) / len(topk_obj_list))
obj_acc_2d_1 = (((topk_obj_2d_list <= 1).sum() * 100) / len(topk_obj_2d_list))
obj_acc_5 = (((topk_obj_list <= 5).sum() * 100) / len(topk_obj_list))
obj_acc_2d_5 = (((topk_obj_2d_list <= 5).sum() * 100) / len(topk_obj_2d_list))
obj_acc_10 = (((topk_obj_list <= 10).sum() * 100) / len(topk_obj_list))
obj_acc_2d_10 = (((topk_obj_2d_list <= 10).sum() * 100) / len(topk_obj_2d_list))
rel_acc_1 = (((topk_rel_list <= 1).sum() * 100) / len(topk_rel_list))
rel_acc_2d_1 = (((topk_rel_2d_list <= 1).sum() * 100) / len(topk_rel_2d_list))
rel_acc_3 = (((topk_rel_list <= 3).sum() * 100) / len(topk_rel_list))
rel_acc_2d_3 = (((topk_rel_2d_list <= 3).sum() * 100) / len(topk_rel_2d_list))
rel_acc_5 = (((topk_rel_list <= 5).sum() * 100) / len(topk_rel_list))
rel_acc_2d_5 = (((topk_rel_2d_list <= 5).sum() * 100) / len(topk_rel_2d_list))
triplet_acc_50 = (((topk_triplet_list <= 50).sum() * 100) / len(topk_triplet_list))
triplet_acc_2d_50 = (((topk_triplet_2d_list <= 50).sum() * 100) / len(topk_triplet_2d_list))
triplet_acc_100 = (((topk_triplet_list <= 100).sum() * 100) / len(topk_triplet_list))
triplet_acc_2d_100 = (((topk_triplet_2d_list <= 100).sum() * 100) / len(topk_triplet_2d_list))
(rel_acc_mean_1, rel_acc_mean_3, rel_acc_mean_5) = self.compute_mean_predicate(cls_matrix_list, topk_rel_list)
(rel_acc_2d_mean_1, rel_acc_2d_mean_3, rel_acc_2d_mean_5) = self.compute_mean_predicate(cls_matrix_list, topk_rel_2d_list)
print(f'Eval: 3d obj : {obj_acc_1}', file=f_in)
print(f'Eval: 2d obj : {obj_acc_2d_1}', file=f_in)
print(f'Eval: 3d obj : {obj_acc_5}', file=f_in)
print(f'Eval: 2d obj : {obj_acc_2d_5}', file=f_in)
print(f'Eval: 3d obj : {obj_acc_10}', file=f_in)
print(f'Eval: 2d obj : {obj_acc_2d_10}', file=f_in)
print(f'Eval: 3d rel : {rel_acc_1}', file=f_in)
print(f'Eval: 3d mean rel : {rel_acc_mean_1}', file=f_in)
print(f'Eval: 2d rel : {rel_acc_2d_1}', file=f_in)
print(f'Eval: 2d mean rel : {rel_acc_2d_mean_1}', file=f_in)
print(f'Eval: 3d rel : {rel_acc_3}', file=f_in)
print(f'Eval: 3d mean rel : {rel_acc_mean_3}', file=f_in)
print(f'Eval: 2d rel : {rel_acc_2d_3}', file=f_in)
print(f'Eval: 2d mean rel : {rel_acc_2d_mean_3}', file=f_in)
print(f'Eval: 3d rel : {rel_acc_5}', file=f_in)
print(f'Eval: 3d mean rel : {rel_acc_mean_5}', file=f_in)
print(f'Eval: 2d rel : {rel_acc_2d_5}', file=f_in)
print(f'Eval: 2d mean rel : {rel_acc_2d_mean_5}', file=f_in)
print(f'Eval: 3d triplet : {triplet_acc_50}', file=f_in)
print(f'Eval: 2d triplet : {triplet_acc_2d_50}', file=f_in)
print(f'Eval: 3d triplet : {triplet_acc_100}', file=f_in)
print(f'Eval: 2d triplet : {triplet_acc_2d_100}', file=f_in)
print(f'Eval: 3d mean : {mean_recall[0]}', file=f_in)
print(f'Eval: 2d mean : {mean_recall_2d[0]}', file=f_in)
print(f'Eval: 3d mean : {mean_recall[1]}', file=f_in)
print(f'Eval: 2d mean : {mean_recall_2d[1]}', file=f_in)
print(f'Eval: 3d zero-shot : {zero_shot_recall[0]}', file=f_in)
print(f'Eval: 3d zero-shot : {zero_shot_recall[1]}', file=f_in)
print(f'Eval: 3d non-zero-shot : {non_zero_shot_recall[0]}', file=f_in)
print(f'Eval: 3d non-zero-shot : {non_zero_shot_recall[1]}', file=f_in)
print(f'Eval: 3d all-zero-shot : {all_zero_shot_recall[0]}', file=f_in)
print(f'Eval: 3d all-zero-shot : {all_zero_shot_recall[1]}', file=f_in)
if self.model.config.EVAL:
f_in.close()
logs = [('/obj_cls_acc', obj_acc_1), ('/obj_2d_cls_acc', obj_acc_2d_1), ('/obj_cls_acc', obj_acc_5), ('/obj_2d_cls_acc', obj_acc_2d_5), ('/obj_cls_acc', obj_acc_10), ('/obj_2d_cls_acc', obj_acc_2d_10), ('/rel_cls_acc', rel_acc_1), ('/rel_cls_acc_mean', rel_acc_mean_1), ('/rel_2d_cls_acc', rel_acc_2d_1), ('/rel_2d_cls_acc_mean', rel_acc_2d_mean_1), ('/rel_cls_acc', rel_acc_3), ('/rel_cls_acc_mean', rel_acc_mean_3), ('/rel_2d_cls_acc', rel_acc_2d_3), ('/rel_2d_cls_acc_mean', rel_acc_2d_mean_3), ('/rel_cls_acc', rel_acc_5), ('/rel_cls_acc_mean', rel_acc_mean_5), ('/rel_2d_cls_acc', rel_acc_2d_5), ('/rel_2d_cls_acc_mean', rel_acc_2d_mean_5), ('/triplet_acc', triplet_acc_50), ('/triplet_2d_acc', triplet_acc_2d_50), ('/triplet_acc', triplet_acc_100), ('/triplet_2d_acc', triplet_acc_2d_100), ('mean_', mean_recall[0]), ('mean_2d_', mean_recall_2d[0]), ('mean_', mean_recall[1]), ('mean_2d_', mean_recall_2d[1]), ('zero_shot_', zero_shot_recall[0]), ('zero_shot_', zero_shot_recall[1]), ('non_zero_shot_', non_zero_shot_recall[0]), ('non_zero_shot_', non_zero_shot_recall[1]), ('all_zero_shot_', all_zero_shot_recall[0]), ('all_zero_shot_', all_zero_shot_recall[1])]
self.log(logs, self.model.iteration)
return mean_recall[0]
def compute_mean_predicate(self, cls_matrix_list, topk_pred_list):
cls_dict = {}
for i in range(26):
cls_dict[i] = []
for (idx, j) in enumerate(cls_matrix_list):
if (j[(- 1)] != (- 1)):
cls_dict[j[(- 1)]].append(topk_pred_list[idx])
(predicate_mean_1, predicate_mean_3, predicate_mean_5) = ([], [], [])
for i in range(26):
l = len(cls_dict[i])
if (l > 0):
m_1 = ((np.array(cls_dict[i]) <= 1).sum() / len(cls_dict[i]))
m_3 = ((np.array(cls_dict[i]) <= 3).sum() / len(cls_dict[i]))
m_5 = ((np.array(cls_dict[i]) <= 5).sum() / len(cls_dict[i]))
predicate_mean_1.append(m_1)
predicate_mean_3.append(m_3)
predicate_mean_5.append(m_5)
predicate_mean_1 = np.mean(predicate_mean_1)
predicate_mean_3 = np.mean(predicate_mean_3)
predicate_mean_5 = np.mean(predicate_mean_5)
return ((predicate_mean_1 * 100), (predicate_mean_3 * 100), (predicate_mean_5 * 100)) |
('pypyr.steps.filewrite.Path')
def test_filewrite_binary(mock_path):
context = Context({'k1': 'v1', 'fileWrite': {'path': '/arb/blah', 'payload': b'one\ntwo\nthree', 'binary': True}})
with io.BytesIO() as out_bytes:
with patch('pypyr.steps.filewrite.open', mock_open()) as mock_output:
mock_output.return_value.write.side_effect = out_bytes.write
filewrite.run_step(context)
payload = out_bytes.getvalue()
mock_path.assert_called_once_with('/arb/blah')
mocked_path = mock_path.return_value
mocked_path.parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_output.assert_called_once_with(mocked_path, 'wb', encoding=None)
assert (payload == b'one\ntwo\nthree') |
class DockerLexer(RegexLexer):
name = 'Docker'
url = '
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
version_added = '2.0'
_keywords = '(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)'
_bash_keywords = '(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)'
_lb = '(?:\\s*\\\\?\\s*)'
flags = (re.IGNORECASE | re.MULTILINE)
tokens = {'root': [('#.*', Comment), ('(FROM)([ \\t]*)(\\S*)([ \\t]*)(?:(AS)([ \\t]*)(\\S*))?', bygroups(Keyword, Whitespace, String, Whitespace, Keyword, Whitespace, String)), (('(ONBUILD)(\\s+)(%s)' % (_lb,)), bygroups(Keyword, Whitespace, using(BashLexer))), (('(HEALTHCHECK)(\\s+)((%s--\\w+=\\w+%s)*)' % (_lb, _lb)), bygroups(Keyword, Whitespace, using(BashLexer))), (('(VOLUME|ENTRYPOINT|CMD|SHELL)(\\s+)(%s)(\\[.*?\\])' % (_lb,)), bygroups(Keyword, Whitespace, using(BashLexer), using(JsonLexer))), (('(LABEL|ENV|ARG)(\\s+)((%s\\w+=\\w+%s)*)' % (_lb, _lb)), bygroups(Keyword, Whitespace, using(BashLexer))), (('(%s|VOLUME)\\b(\\s+)(.*)' % _keywords), bygroups(Keyword, Whitespace, String)), (('(%s)(\\s+)' % (_bash_keywords,)), bygroups(Keyword, Whitespace)), ('(.*\\\\\\n)*.+', using(BashLexer))]} |
def load_data(data_path, dataset, images):
all_datas = {}
for split in ['train', 'val', 'test']:
datas = []
dropdata = 0
if (not os.path.exists(((data_path + split) + '.json'))):
continue
with open(((data_path + split) + '.json'), 'r', encoding='utf-8') as fin:
for line in fin:
jterm = json.loads(line.strip())
if (dataset == 'bird'):
if ((jterm['img1'] in images) and (jterm['img2'] in images)):
if (split == 'train'):
datas.append(jterm)
else:
for des in jterm['description']:
new_jterm = {}
new_jterm['img1'] = jterm['img1']
new_jterm['img2'] = jterm['img2']
new_jterm['description'] = des
datas.append(new_jterm)
else:
dropdata += 1
elif (dataset == 'cub'):
jterm['img'] = jterm['img'].split('/')[(- 1)]
if (jterm['img'] in images):
if (split == 'train'):
datas.append(jterm)
else:
for des in jterm['description']:
new_jterm = {}
new_jterm['img'] = jterm['img']
new_jterm['description'] = des
datas.append(new_jterm)
else:
dropdata += 1
elif (dataset == 'nabirds'):
datas.append(jterm)
print('dataset:', dataset, 'Total True Label datas ', len(datas), 'drop ', dropdata, ' data')
random.shuffle(datas)
all_datas[split] = datas
return all_datas |
class Requirement():
def damage(self, current_resources: ResourceCollection, database: ResourceDatabase) -> int:
raise NotImplementedError
def satisfied(self, current_resources: ResourceCollection, current_energy: int, database: ResourceDatabase) -> bool:
raise NotImplementedError
def patch_requirements(self, static_resources: ResourceCollection, damage_multiplier: float, database: ResourceDatabase) -> Requirement:
raise NotImplementedError
def simplify(self, *, keep_comments: bool=False) -> Requirement:
raise NotImplementedError
def as_set(self, database: ResourceDatabase) -> RequirementSet:
raise NotImplementedError
_cache
def trivial(cls) -> Requirement:
from randovania.game_description.requirements.requirement_and import RequirementAnd
return RequirementAnd([])
_cache
def impossible(cls) -> Requirement:
from randovania.game_description.requirements.requirement_or import RequirementOr
return RequirementOr([])
def __lt__(self, other: Requirement) -> bool:
return (str(self) < str(other))
def iterate_resource_requirements(self, database: ResourceDatabase) -> Iterator[ResourceRequirement]:
raise NotImplementedError |
class RepairCore(ABC):
problemDectors: Dict[(str, ProblemDetector)]
patchSynthesizers: Dict[(str, PatchSynthesizer)]
def name(self) -> str:
pass
def __init__(self, clsProblemDectors: Iterable[Type[ProblemDetector]], clsPatchSynthesizers: Iterable[Type[PatchSynthesizer]], detectorArgs: Optional[Dict[(str, str)]]=None, synthesizerArgs: Optional[Dict[(str, str)]]=None, repaircoreArgs: Optional[Dict[(str, str)]]=None):
self.problemDectors = self.instantiateProblemDectors(clsProblemDectors, detectorArgs)
self.patchSynthesizers = self.instantiatePatchSynthesizer(clsPatchSynthesizers, synthesizerArgs)
async def repair(self, path_source: Sequence[Path], targetContractName: str, targetedVul: Optional[List[str]]=None, targetedLoc: Optional[Sequence[CodeRange]]=None, num_plausible: int=1, repair_target: RepairTarget=RepairTarget(), maxTrial: Optional[int]=None, **extra_args) -> Sequence[PlausiblePatch]:
pass
def instantiateProblemDectors(clsProblemDectors: Iterable[Type[ProblemDetector]], detectorArgs: Optional[dict]=None) -> Dict[(str, ProblemDetector)]:
if (detectorArgs is None):
detectorArgs = {}
return {pd.name: pd for pd in (clsPd(args=detectorArgs) for clsPd in clsProblemDectors)}
def instantiatePatchSynthesizer(clsPatchSynthesizers: Iterable[Type[PatchSynthesizer]], synthesizerArgs: Optional[dict]=None) -> Dict[(str, PatchSynthesizer)]:
if (synthesizerArgs is None):
synthesizerArgs = {}
return {ps.name: ps for ps in (clsPs(args=synthesizerArgs) for clsPs in clsPatchSynthesizers)}
def targetVulDetected(rst: ProblemDetectorResults, targetedVul: Optional[Sequence[str]]=None) -> bool:
return any((vul.isTargeted(targetedVul) for vul in chain.from_iterable(rst.values())))
async def detectPatch(self, patch: PatchInfo, targetContractName: str, targetLocations: Optional[Sequence[CodeRange]]=None, fastFail: bool=False, targetedVul: Optional[Sequence[str]]=None, not_skippable_detectors: Sequence[str]=()) -> ProblemDetectorResults:
class FastFailException(Exception):
pass
async def coroTargetVulDetect(detectCoro: Awaitable[ProblemDetectorResult], fastFail: bool):
rst = (await detectCoro)
rst_ = ProblemDetectorResults({'detector:': rst})
if ((self.targetVulDetected(rst_, targetedVul) is not False) and fastFail):
raise FastFailException()
return rst
async def helper(detectorName, detector, fastFail: bool):
return (detectorName, (await coroTargetVulDetect(detector.detect((patch.PatchedFilePath,), targetContractName, targetLocations, targetedVul, fastFail), fastFail)))
skippable_problemDetectors = {k: self.problemDectors[k] for k in self.problemDectors if (k not in not_skippable_detectors)}
non_skippable_problemDetectors = {k: self.problemDectors[k] for k in self.problemDectors if (k in not_skippable_detectors)}
non_skippable_feature = asyncio.gather(*(helper(detectorName, detector, False) for (detectorName, detector) in non_skippable_problemDetectors.items()))
skippable_feature = asyncio.gather(*(helper(detectorName, detector, fastFail) for (detectorName, detector) in skippable_problemDetectors.items()))
async def skippableAsyncFn_helper():
try:
return (await skippable_feature)
except FastFailException:
logger.debug('Some detector detected target vulnerability and fastFaill specified, cancelling continuation of evaluation of this patch.')
skippable_feature.cancel()
return ('fastfail', (DetectedVulnerability_FastFail(),))
rsts = (await asyncio.gather(skippableAsyncFn_helper(), non_skippable_feature))
return ProblemDetectorResults(chain.from_iterable((rsts[0], rsts[1]))) |
class TelemetryData(MutableMapping):
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
value = self.store[self.__keytransform__(key)]
if isinstance(value, dict):
return self.__class__(value)
return value
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key |
class CoreAudioSource(StreamingSource):
def __init__(self, filename, file=None):
self._bl = None
self._file = file
self._deleted = False
self._file_obj = None
self._audfile = None
self._audref = None
audref = ExtAudioFileRef()
if (file is None):
fn_str = CFSTR(filename)
url_ref = cf.CFURLCreateWithFileSystemPath(None, fn_str, kCFURLPOSIXPathStyle, False)
err_check(ca.ExtAudioFileOpenURL(url_ref, byref(audref)))
else:
self.file_obj = MemoryFileObject(file)
self._audfile = AudioFileID()
err_check(ca.AudioFileOpenWithCallbacks(None, self.file_obj.read_func, None, self.file_obj.getsize_func, None, 0, byref(self._audfile)))
err_check(ca.ExtAudioFileWrapAudioFileID(self._audfile, False, byref(audref)))
self._audref = audref
format_info = AudioStreamBasicDescription()
size = c_uint32(sizeof(format_info))
err_check(ca.ExtAudioFileGetProperty(self._audref, kExtAudioFileProperty_FileDataFormat, byref(size), byref(format_info)))
self.convert_desc = self.convert_format(format_info)
err_check(ca.ExtAudioFileSetProperty(self._audref, kExtAudioFileProperty_ClientDataFormat, sizeof(self.convert_desc), byref(self.convert_desc)))
length = c_long()
size = c_uint32(sizeof(format_info))
err_check(ca.ExtAudioFileGetProperty(self._audref, kExtAudioFileProperty_FileLengthFrames, byref(size), byref(length)))
self.audio_format = AudioFormat(channels=self.convert_desc.mChannelsPerFrame, sample_size=self.convert_desc.mBitsPerChannel, sample_rate=int(self.convert_desc.mSampleRate))
self._num_frames = length.value
self._bytes_per_frame = self.convert_desc.mBytesPerFrame
self._duration = (self._num_frames / self.convert_desc.mSampleRate)
self._duration_per_frame = (self._duration / self._num_frames)
def convert_format(original_desc, bitdepth=16):
adesc = AudioStreamBasicDescription()
adesc.mSampleRate = original_desc.mSampleRate
adesc.mFormatID = kAudioFormatLinearPCM
adesc.mFormatFlags = (kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked)
adesc.mChannelsPerFrame = original_desc.mChannelsPerFrame
adesc.mBitsPerChannel = bitdepth
adesc.mBytesPerPacket = ((original_desc.mChannelsPerFrame * adesc.mBitsPerChannel) // 8)
adesc.mFramesPerPacket = 1
adesc.mBytesPerFrame = adesc.mBytesPerPacket
return adesc
def __del__(self):
if self._file:
self._file.close()
self._file = None
if self._audfile:
err_check(ca.AudioFileClose(self._audfile))
self._audfile = None
if self._audref:
err_check(ca.ExtAudioFileDispose(self._audref))
self._audref = None
def get_audio_data(self, num_bytes, compensation_time=0.0):
num_frames = c_uint32((num_bytes // self.convert_desc.mBytesPerFrame))
if (not self._bl):
buffer = create_string_buffer(num_bytes)
self._bl = AudioBufferList()
self._bl.mNumberBuffers = 1
self._bl.mBuffers[0].mNumberChannels = self.convert_desc.mChannelsPerFrame
self._bl.mBuffers[0].mDataByteSize = num_bytes
self._bl.mBuffers[0].mData = cast(buffer, c_void_p)
while True:
ca.ExtAudioFileRead(self._audref, byref(num_frames), byref(self._bl))
size = self._bl.mBuffers[0].mDataByteSize
if (not size):
break
data = cast(self._bl.mBuffers[0].mData, POINTER(c_char))
slice = data[:size]
return AudioData(slice, size, 0.0, (size / self.audio_format.sample_rate), [])
return None
def seek(self, timestamp):
self._bl = None
timestamp = max(0.0, min(timestamp, self._duration))
position = int((timestamp / self._duration_per_frame))
ca.ExtAudioFileSeek(self._audref, position) |
def get_example_models():
example_dir = os.path.join(os.path.dirname(__file__), '..', 'examples')
for filename in os.listdir(example_dir):
if (filename.endswith('.py') and (not filename.startswith('run_')) and (not filename.startswith('__'))):
modelname = filename[:(- 3)]
if ((modelname == 'localfunc') and (sys.version_info.major < 3)):
continue
package = ('pysb.examples.' + modelname)
module = importlib.import_module(package)
SelfExporter.do_export = True
(yield module.model) |
class PandaRealSensed435Config(PandaDefaultConfig):
def __init__(self) -> None:
super().__init__()
self.urdf_path = '{PACKAGE_ASSET_DIR}/descriptions/panda_v3.urdf'
def cameras(self):
return CameraConfig(uid='hand_camera', p=[0, 0, 0], q=[1, 0, 0, 0], width=128, height=128, fov=1.57, near=0.01, far=10, actor_uid='camera_link') |
class Tokenizer():
def add_file_to_dictionary(filename, dict, tokenize, append_eos=True):
with open(filename, 'r') as f:
for line in f:
for word in tokenize(line):
dict.add_symbol(word)
if append_eos:
dict.add_symbol(dict.eos_word)
def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False):
(nseq, ntok) = (0, 0)
replaced = Counter()
def replaced_consumer(word, idx):
if ((idx == dict.unk_index) and (word != dict.unk_word)):
replaced.update([word])
with open(filename, 'r') as f:
for line in f:
ids = Tokenizer.tokenize(line=line, dict=dict, tokenize=tokenize, add_if_not_exist=False, consumer=(replaced_consumer if hasattr(dict, 'unk_index') else None), append_eos=append_eos, reverse_order=reverse_order)
nseq += 1
consumer(ids)
ntok += len(ids)
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': len(replaced)}
def tokenize(line, dict, tokenize=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
words = tokenize(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(((nwords + 1) if append_eos else nwords))
for (i, word) in enumerate(words):
if add_if_not_exist:
idx = dict.add_symbol(word)
else:
idx = dict.index(word)
if (consumer is not None):
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = dict.eos_index
return ids |
class Effect3961(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Repair Systems')), 'armorDamageAmount', module.getModifiedItemAttr('subsystemBonusGallenteDefensive'), skill='Gallente Defensive Systems', **kwargs) |
def main(filename, save):
data = pd.read_csv(filename)
data = data[['hit_x', 'hit_y']]
df = pd.DataFrame([])
df['hit_x'] = data['hit_x']
df['hit_y'] = data['hit_y']
df['hit_area'] = pd.Series(to_area(data['hit_x'], data['hit_y']))
df.to_csv(save, index=False, encoding='utf-8') |
def _build_hint(parser: argparse.ArgumentParser, arg_action: argparse.Action) -> str:
suppress_hint = arg_action.get_suppress_tab_hint()
if (suppress_hint or (arg_action.help == argparse.SUPPRESS)):
return ''
else:
formatter = parser._get_formatter()
formatter.start_section('Hint')
formatter.add_argument(arg_action)
formatter.end_section()
return formatter.format_help() |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x, fake_relu=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
if fake_relu:
return FakeReLU.apply(out)
return F.relu(out) |
def default_argument_parser():
parser = argparse.ArgumentParser(description='Detectron2 Training')
parser.add_argument('--config-file', default='configs/smoke_gn_vector.yaml', metavar='FILE', help='path to config file')
parser.add_argument('--eval-only', action='store_true', help='perform evaluation only')
parser.add_argument('--ckpt', help='The path to the checkpoint for test, default is the latest checkpoint.', default=None)
parser.add_argument('--num-gpus', type=int, default=1, help='number of gpus *per machine*')
parser.add_argument('--num-machines', type=int, default=1)
parser.add_argument('--machine-rank', type=int, default=0, help='the rank of this machine (unique per machine)')
port = (((2 ** 15) + (2 ** 14)) + (hash(os.getuid()) % (2 ** 14)))
parser.add_argument('--dist-url', default='tcp://127.0.0.1:{}'.format(port))
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
return parser |
class VoteStorage():
def __init__(self, request, name, rate):
self.request = request
self.name = name
self.items = request.session.get(name, [])
self.rate = rate
def add(self, instance):
self.items.append(instance.pk)
self.request.session[self.name] = self.items
instance.update_vote(self.rate)
def remove(self, instance):
self.items.remove(instance.pk)
self.request.session[self.name] = self.items
instance.update_vote((self.rate * (- 1)))
def filter(self, pk):
return self._Filter(pk, self.items)
class _Filter():
def __init__(self, pk, items):
self.pk = pk
self.items = items
def exists(self):
return (int(self.pk) in self.items) |
class StyleElementDescription():
def __init__(self, name, description, defaultFormat):
self._name = name
self._description = description
self._defaultFormat = StyleFormat(defaultFormat)
def __repr__(self):
return ('<"%s": "%s">' % (self.name, self.defaultFormat))
def name(self):
return self._name
def key(self):
return self._name.replace(' ', '').lower()
def description(self):
return self._description
def defaultFormat(self):
return self._defaultFormat |
def test_classical_truth_table():
truth_table = []
for (c, t) in itertools.product([0, 1], repeat=2):
(out_c, out_t) = CNOT().call_classically(ctrl=c, target=t)
truth_table.append(((c, t), (out_c, out_t)))
assert (truth_table == [((0, 0), (0, 0)), ((0, 1), (0, 1)), ((1, 0), (1, 1)), ((1, 1), (1, 0))])
with pytest.raises(ValueError):
CNOT().call_classically(ctrl=2, target=0) |
class FeaturesManager():
_TASKS_TO_AUTOMODELS = {}
_TASKS_TO_TF_AUTOMODELS = {}
if is_torch_available():
_TASKS_TO_AUTOMODELS = {'default': AutoModel, 'masked-lm': AutoModelForMaskedLM, 'causal-lm': AutoModelForCausalLM, 'seq2seq-lm': AutoModelForSeq2SeqLM, 'sequence-classification': AutoModelForSequenceClassification, 'token-classification': AutoModelForTokenClassification, 'multiple-choice': AutoModelForMultipleChoice, 'question-answering': AutoModelForQuestionAnswering, 'image-classification': AutoModelForImageClassification}
if is_tf_available():
_TASKS_TO_TF_AUTOMODELS = {'default': TFAutoModel, 'masked-lm': TFAutoModelForMaskedLM, 'causal-lm': TFAutoModelForCausalLM, 'seq2seq-lm': TFAutoModelForSeq2SeqLM, 'sequence-classification': TFAutoModelForSequenceClassification, 'token-classification': TFAutoModelForTokenClassification, 'multiple-choice': TFAutoModelForMultipleChoice, 'question-answering': TFAutoModelForQuestionAnswering}
_SUPPORTED_MODEL_TYPE = {'albert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=AlbertOnnxConfig), 'bart': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'sequence-classification', 'question-answering', onnx_config_cls=BartOnnxConfig), 'mbart': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'sequence-classification', 'question-answering', onnx_config_cls=MBartOnnxConfig), 'bert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=BertOnnxConfig), 'ibert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=IBertOnnxConfig), 'camembert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=CamembertOnnxConfig), 'distilbert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=DistilBertOnnxConfig), 'flaubert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=FlaubertOnnxConfig), 'marian': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'causal-lm', 'causal-lm-with-past', onnx_config_cls=MarianOnnxConfig), 'm2m-100': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls=M2M100OnnxConfig), 'roberta': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=RobertaOnnxConfig), 't5': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls=T5OnnxConfig), 'xlm-roberta': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=XLMRobertaOnnxConfig), 'gpt2': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'sequence-classification', 'token-classification', onnx_config_cls=GPT2OnnxConfig), 'gpt-j': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'question-answering', 'sequence-classification', onnx_config_cls=GPTJOnnxConfig), 'gpt-neo': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'sequence-classification', onnx_config_cls=GPTNeoOnnxConfig), 'layoutlm': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'token-classification', onnx_config_cls=LayoutLMOnnxConfig), 'electra': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls=ElectraOnnxConfig), 'vit': supported_features_mapping('default', 'image-classification', onnx_config_cls=ViTOnnxConfig), 'blenderbot': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls=BlenderbotOnnxConfig), 'blenderbot-small': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls=BlenderbotSmallOnnxConfig)}
AVAILABLE_FEATURES = sorted(reduce((lambda s1, s2: (s1 | s2)), (v.keys() for v in _SUPPORTED_MODEL_TYPE.values())))
def get_supported_features_for_model_type(model_type: str, model_name: Optional[str]=None) -> Dict[(str, Callable[([PretrainedConfig], OnnxConfig)])]:
model_type = model_type.lower()
if (model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE):
model_type_and_model_name = (f'{model_type} ({model_name})' if model_name else model_type)
raise KeyError(f'{model_type_and_model_name} is not supported yet. Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. If you want to support {model_type} please propose a PR or open up an issue.')
return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]
def feature_to_task(feature: str) -> str:
return feature.replace('-with-past', '')
def _validate_framework_choice(framework: str):
if (framework not in ['pt', 'tf']):
raise ValueError(f'Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided.')
elif ((framework == 'pt') and (not is_torch_available())):
raise RuntimeError('Cannot export model to ONNX using PyTorch because no PyTorch package was found.')
elif ((framework == 'tf') and (not is_tf_available())):
raise RuntimeError('Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.')
def get_model_class_for_feature(feature: str, framework: str='pt') -> Type:
task = FeaturesManager.feature_to_task(feature)
FeaturesManager._validate_framework_choice(framework)
if (framework == 'pt'):
task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS
else:
task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS
if (task not in task_to_automodel):
raise KeyError(f'Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}')
return task_to_automodel[task]
def get_model_from_feature(feature: str, model: str, framework: str='pt', cache_dir: str=None) -> Union[(PreTrainedModel, TFPreTrainedModel)]:
model_class = FeaturesManager.get_model_class_for_feature(feature, framework)
try:
model = model_class.from_pretrained(model, cache_dir=cache_dir)
except OSError:
if (framework == 'pt'):
model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir)
else:
model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir)
return model
def check_supported_model_or_raise(model: Union[(PreTrainedModel, TFPreTrainedModel)], feature: str='default') -> Tuple[(str, Callable)]:
model_type = model.config.model_type.replace('_', '-')
model_name = getattr(model, 'name', '')
model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name)
if (feature not in model_features):
raise ValueError(f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}")
return (model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]) |
def test_call_after_hooks_in_correct_order(hookregistry, mocker):
data = []
(order=2)
def second_hook(features):
data.append(2)
(order=1)
def first_hook(step):
data.append(1)
hookregistry.call('after', 'all', False, mocker.MagicMock())
assert (data == [2, 1]) |
class TestAssert_reprcompare_attrsclass():
def test_attrs(self) -> None:
class SimpleDataObject():
field_a = attr.ib()
field_b = attr.ib()
left = SimpleDataObject(1, 'b')
right = SimpleDataObject(1, 'c')
lines = callequal(left, right)
assert (lines is not None)
assert lines[2].startswith('Omitting 1 identical item')
assert ('Matching attributes' not in lines)
for line in lines[2:]:
assert ('field_a' not in line)
def test_attrs_recursive(self) -> None:
class OtherDataObject():
field_c = attr.ib()
field_d = attr.ib()
class SimpleDataObject():
field_a = attr.ib()
field_b = attr.ib()
left = SimpleDataObject(OtherDataObject(1, 'a'), 'b')
right = SimpleDataObject(OtherDataObject(1, 'b'), 'b')
lines = callequal(left, right)
assert (lines is not None)
assert ('Matching attributes' not in lines)
for line in lines[1:]:
assert ('field_b:' not in line)
assert ('field_c:' not in line)
def test_attrs_recursive_verbose(self) -> None:
class OtherDataObject():
field_c = attr.ib()
field_d = attr.ib()
class SimpleDataObject():
field_a = attr.ib()
field_b = attr.ib()
left = SimpleDataObject(OtherDataObject(1, 'a'), 'b')
right = SimpleDataObject(OtherDataObject(1, 'b'), 'b')
lines = callequal(left, right)
assert (lines is not None)
assert (" field_d: 'a' != 'b'" in lines)
def test_attrs_verbose(self) -> None:
class SimpleDataObject():
field_a = attr.ib()
field_b = attr.ib()
left = SimpleDataObject(1, 'b')
right = SimpleDataObject(1, 'c')
lines = callequal(left, right, verbose=2)
assert (lines is not None)
assert lines[2].startswith('Matching attributes:')
assert ('Omitting' not in lines[2])
assert (lines[3] == "['field_a']")
def test_attrs_with_attribute_comparison_off(self) -> None:
class SimpleDataObject():
field_a = attr.ib()
field_b = attr.ib(eq=False)
left = SimpleDataObject(1, 'b')
right = SimpleDataObject(1, 'b')
lines = callequal(left, right, verbose=2)
assert (lines is not None)
assert lines[2].startswith('Matching attributes:')
assert ('Omitting' not in lines[1])
assert (lines[3] == "['field_a']")
for line in lines[3:]:
assert ('field_b' not in line)
def test_comparing_two_different_attrs_classes(self) -> None:
class SimpleDataObjectOne():
field_a = attr.ib()
field_b = attr.ib()
class SimpleDataObjectTwo():
field_a = attr.ib()
field_b = attr.ib()
left = SimpleDataObjectOne(1, 'b')
right = SimpleDataObjectTwo(1, 'c')
lines = callequal(left, right)
assert (lines is None)
def test_attrs_with_auto_detect_and_custom_eq(self) -> None:
(auto_detect=True)
class SimpleDataObject():
field_a = attr.ib()
def __eq__(self, other):
return super().__eq__(other)
left = SimpleDataObject(1)
right = SimpleDataObject(2)
lines = callequal(left, right, verbose=2)
assert (lines is None)
def test_attrs_with_custom_eq(self) -> None:
(slots=False)
class SimpleDataObject():
field_a = attr.ib()
def __eq__(self, other):
return super().__eq__(other)
left = SimpleDataObject(1)
right = SimpleDataObject(2)
lines = callequal(left, right, verbose=2)
assert (lines is None) |
class Cipher(typing.Generic[Mode]):
def __init__(self, algorithm: CipherAlgorithm, mode: Mode, backend: typing.Any=None) -> None:
if (not isinstance(algorithm, CipherAlgorithm)):
raise TypeError('Expected interface of CipherAlgorithm.')
if (mode is not None):
assert isinstance(mode, modes.Mode)
mode.validate_for_algorithm(algorithm)
self.algorithm = algorithm
self.mode = mode
def encryptor(self: Cipher[modes.ModeWithAuthenticationTag]) -> AEADEncryptionContext:
...
def encryptor(self: _CIPHER_TYPE) -> CipherContext:
...
def encryptor(self):
if isinstance(self.mode, modes.ModeWithAuthenticationTag):
if (self.mode.tag is not None):
raise ValueError('Authentication tag must be None when encrypting.')
from cryptography.hazmat.backends.openssl.backend import backend
ctx = backend.create_symmetric_encryption_ctx(self.algorithm, self.mode)
return self._wrap_ctx(ctx, encrypt=True)
def decryptor(self: Cipher[modes.ModeWithAuthenticationTag]) -> AEADDecryptionContext:
...
def decryptor(self: _CIPHER_TYPE) -> CipherContext:
...
def decryptor(self):
from cryptography.hazmat.backends.openssl.backend import backend
ctx = backend.create_symmetric_decryption_ctx(self.algorithm, self.mode)
return self._wrap_ctx(ctx, encrypt=False)
def _wrap_ctx(self, ctx: _BackendCipherContext, encrypt: bool) -> ((AEADEncryptionContext | AEADDecryptionContext) | CipherContext):
if isinstance(self.mode, modes.ModeWithAuthenticationTag):
if encrypt:
return _AEADEncryptionContext(ctx)
else:
return _AEADDecryptionContext(ctx)
else:
return _CipherContext(ctx) |
def test_filewritejson_empty_path_raises():
context = Context({'fileWriteJson': {'path': None}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
filewrite.run_step(context)
assert (str(err_info.value) == "context['fileWriteJson']['path'] must have a value for pypyr.steps.filewritejson.") |
_lazy('cudf')
def _register_cudf():
import cudf
(cudf.DataFrame)
(cudf.Series)
(cudf.BaseIndex)
def proxify_device_object_cudf_dataframe(obj, proxied_id_to_proxy, found_proxies, excl_proxies):
return proxify(obj, proxied_id_to_proxy, found_proxies)
try:
from dask.array.dispatch import percentile_lookup
from dask_cudf.backends import percentile_cudf
percentile_lookup.register(ProxyObject, percentile_cudf)
except ImportError:
pass |
class EdgeBlock(nn.Module):
def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, drop_block=None, drop_path=0.0):
super(EdgeBlock, self).__init__()
mid_chs = int(round((out_chs * bottle_ratio)))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs)
self.attn = (attn_layer(mid_chs, act_layer=act_layer) if (attn_layer is not None) else nn.Identity())
self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs)
self.drop_path = (DropPath(drop_path) if drop_path else nn.Identity())
def zero_init_last(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.attn(x)
x = self.conv2(x)
x = (self.drop_path(x) + shortcut)
return x |
def test_compile_single_qubit_gates():
q = cirq.LineQubit(0)
c1 = cirq.Circuit()
for _ in range(10):
c1.append((random.choice([cirq.X, cirq.Y, cirq.Z])(q) ** random.random()))
c2 = compile_single_qubit_gates(c1)
assert (c1 != c2)
assert (len(c2) == 2)
assert isinstance(c2[0].operations[0].gate, cirq.PhasedXPowGate)
assert isinstance(c2[1].operations[0].gate, cirq.ZPowGate)
u1 = c1.unitary(dtype=np.complex128)
u2 = c2.unitary(dtype=np.complex128)
cirq.testing.assert_allclose_up_to_global_phase(u1, u2, atol=1e-08) |
class AtspiMeta(BaseMeta):
control_type_to_cls = {}
def __init__(cls, name, bases, attrs):
BaseMeta.__init__(cls, name, bases, attrs)
for t in cls._control_types:
AtspiMeta.control_type_to_cls[t] = cls
def find_wrapper(element):
try:
wrapper_match = AtspiMeta.control_type_to_cls[element.control_type]
except KeyError:
wrapper_match = AtspiWrapper
return wrapper_match |
class PornhubCom(SimpleDownloader):
__name__ = 'PornhubCom'
__type__ = 'downloader'
__version__ = '0.62'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Pornhub.com downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('jeix', ''), ('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
NAME_PATTERN = '"video_title":"(?P<N>.+?)"'
TEMP_OFFLINE_PATTERN = '^unmatchable$'
OFFLINE_PATTERN = '^unmatchable$'
def get_info(self, url='', html=''):
info = super(PornhubCom, self).get_info(url, html)
if ('name' in info):
info['name'] += '.mp4'
return info
def setup(self):
self.resume_download = True
self.multi_dl = True
def handle_free(self, pyfile):
m = re.search('<div class="video-wrapper">.+?<script type="text/javascript">(.+?)</script>', self.data, re.S)
if (m is None):
self.error(self._('Player Javascript data not found'))
script = m.group(1)
m = re.search('qualityItems_\\d+', script)
if (m is None):
self.error(self._('`qualityItems` variable no found'))
result_var = re.search('qualityItems_\\d+', script).group(0)
script = ''.join(re.findall('^\\s*var .+', script, re.M))
script = re.sub('[\\n\\t]|/\\*.+?\\*/', '', script)
script += 'JSON.stringify({});'.format(result_var)
res = eval_js(script)
json_data = json.loads(res)
urls = {int(re.search('^(\\d+)', x['text']).group(0)): x['url'] for x in json_data if x['url']}
quality = max(urls.keys())
self.link = urls[quality] |
class FacesDisplay():
def __init__(self, size, padding, tk_vars):
logger.trace('Initializing %s: (size: %s, padding: %s, tk_vars: %s)', self.__class__.__name__, size, padding, tk_vars)
self.size = size
self.display_dims = (1, 1)
self.tk_vars = tk_vars
self.padding = padding
self.update_source = False
self.source = list()
self.destination = list()
self.faces = dict()
self.faces_source = None
self.faces_dest = None
self.tk_image = None
logger.trace('Initialized %s', self.__class__.__name__)
def total_columns(self):
return len(self.source)
def update_tk_image(self):
logger.trace('Updating tk image')
self.build_faces_image()
img = np.vstack((self.faces_source, self.faces_dest))
size = self.get_scale_size(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
img = img.resize(size, Image.ANTIALIAS)
self.tk_image = ImageTk.PhotoImage(img)
self.tk_vars['refresh'].set(False)
logger.trace('Updated tk image')
def get_scale_size(self, image):
frameratio = (float(self.display_dims[0]) / float(self.display_dims[1]))
imgratio = (float(image.shape[1]) / float(image.shape[0]))
if (frameratio <= imgratio):
scale = (self.display_dims[0] / float(image.shape[1]))
size = (self.display_dims[0], max(1, int((image.shape[0] * scale))))
else:
scale = (self.display_dims[1] / float(image.shape[0]))
size = (max(1, int((image.shape[1] * scale))), self.display_dims[1])
logger.trace('scale: %s, size: %s', scale, size)
return size
def build_faces_image(self):
logger.trace('Building Faces Image')
update_all = self.update_source
self.faces_from_frames()
if update_all:
header = self.header_text()
source = np.hstack([self.draw_rect(face) for face in self.faces['src']])
self.faces_source = np.vstack((header, source))
self.faces_dest = np.hstack([self.draw_rect(face) for face in self.faces['dst']])
logger.debug('source row shape: %s, swapped row shape: %s', self.faces_dest.shape, self.faces_source.shape)
def faces_from_frames(self):
logger.debug('Extracting faces from frames: Number images: %s', len(self.source))
if self.update_source:
self.crop_source_faces()
self.crop_destination_faces()
logger.debug('Extracted faces from frames: %s', {k: len(v) for (k, v) in self.faces.items()})
def crop_source_faces(self):
logger.debug('Updating source faces')
self.faces = dict()
for image in self.source:
detected_face = image['detected_faces'][0]
src_img = image['image']
detected_face.load_aligned(src_img, self.size, align_eyes=False)
matrix = detected_face.aligned['matrix']
self.faces.setdefault('filenames', list()).append(os.path.splitext(image['filename'])[0])
self.faces.setdefault('matrix', list()).append(matrix)
self.faces.setdefault('src', list()).append(AlignerExtract().transform(src_img, matrix, self.size, self.padding))
self.update_source = False
logger.debug('Updated source faces')
def crop_destination_faces(self):
logger.debug('Updating destination faces')
self.faces['dst'] = list()
destination = (self.destination if self.destination else [np.ones_like(src['image']) for src in self.source])
for (idx, image) in enumerate(destination):
self.faces['dst'].append(AlignerExtract().transform(image, self.faces['matrix'][idx], self.size, self.padding))
logger.debug('Updated destination faces')
def header_text(self):
font_scale = (self.size / 640)
height = (self.size // 8)
font = cv2.FONT_HERSHEY_SIMPLEX
text_sizes = [cv2.getTextSize(self.faces['filenames'][idx], font, font_scale, 1)[0] for idx in range(self.total_columns)]
text_y = int(((height + text_sizes[0][1]) / 2))
text_x = [(int(((self.size - text_sizes[idx][0]) / 2)) + (self.size * idx)) for idx in range(self.total_columns)]
logger.debug('filenames: %s, text_sizes: %s, text_x: %s, text_y: %s', self.faces['filenames'], text_sizes, text_x, text_y)
header_box = (np.ones((height, (self.size * self.total_columns), 3), np.uint8) * 255)
for (idx, text) in enumerate(self.faces['filenames']):
cv2.putText(header_box, text, (text_x[idx], text_y), font, font_scale, (0, 0, 0), 1, lineType=cv2.LINE_AA)
logger.debug('header_box.shape: %s', header_box.shape)
return header_box
def draw_rect(self, image):
cv2.rectangle(image, (0, 0), ((self.size - 1), (self.size - 1)), (255, 255, 255), 1)
image = np.clip(image, 0.0, 255.0)
return image.astype('uint8') |
def test_exclude_glob(pytester: Pytester) -> None:
hellodir = pytester.mkdir('hello')
hellodir.joinpath('test_hello.py').write_text('x y syntaxerror', encoding='utf-8')
hello2dir = pytester.mkdir('hello2')
hello2dir.joinpath('test_hello2.py').write_text('x y syntaxerror', encoding='utf-8')
hello3dir = pytester.mkdir('hallo3')
hello3dir.joinpath('test_hello3.py').write_text('x y syntaxerror', encoding='utf-8')
subdir = pytester.mkdir('sub')
subdir.joinpath('test_hello4.py').write_text('x y syntaxerror', encoding='utf-8')
pytester.makepyfile(test_ok='def test_pass(): pass')
result = pytester.runpytest('--ignore-glob=*h[ea]llo*')
assert (result.ret == 0)
result.stdout.fnmatch_lines(['*1 passed*']) |
(host=st.one_of(st.ip_addresses(), st.text()), port=st.one_of(st.none(), st.integers()), raises=st.booleans())
def test_setup_url_for_address(host: str, port: (int | None), raises: bool) -> None:
kwargs = {}
if raises:
kwargs['side_effect'] = gaierror
else:
kwargs['return_value'] = '127.0.0.1'
with mock.patch.object(discovery, 'gethostbyname', **kwargs), mock.patch.object(discovery, 'probe_wemo', return_value=port):
url = discovery.setup_url_for_address(host=host, port=port)
if port:
assert url.endswith(f':{port}/setup.xml') |
def statistics_match(df1, df2):
matches = []
bounds = {'red_light': {'type': 'discrete', 'eps': 0.005}, 'hazard_stop': {'type': 'discrete', 'eps': 0.005}, 'speed_sign': {'type': 'discrete', 'eps': 0.005}, 'center_distance': {'type': 'cont', 'eps': 0.02}, 'relative_angle': {'type': 'cont', 'eps': 0.01}, 'veh_distance': {'type': 'cont', 'eps': 0.6}}
def calc_rel_count(x):
(_, counts) = np.unique(x, return_counts=True)
return (counts / sum(counts))
def calc_mean(x):
return np.array([np.mean(x)])
def cmp(key):
fun = (calc_rel_count if (bounds[key]['type'] == 'discrete') else calc_mean)
if (not all((abs((fun(df1[key]) - fun(df2[key]))) < bounds[key]['eps']))):
print(f'too different in {key}')
return False
else:
return True
for k in bounds.keys():
matches.append(cmp(k))
return all(matches) |
class ResNet18(chainer.Chain):
def __init__(self):
super(ResNet18, self).__init__(conv1_relu=ConvolutionBlock(1, 32), res2a_relu=ResidualBlock(32, 32), res2b_relu=ResidualBlock(32, 32), res3a_relu=ResidualBlockB(32, 64), res3b_relu=ResidualBlock(64, 64), res4a_relu=ResidualBlockB(64, 128), res4b_relu=ResidualBlock(128, 128), res5a_relu=ResidualBlockB(128, 256), res5b_relu=ResidualBlock(256, 256))
def __call__(self, TEST, x):
h = self.conv1_relu(TEST, x)
h = chainer.functions.max_pooling_2d(h, (1, 9), (1, 4), (0, 4))
h = self.res2a_relu(TEST, h)
h = self.res2b_relu(TEST, h)
h = self.res3a_relu(TEST, h)
h = self.res3b_relu(TEST, h)
h = self.res4a_relu(TEST, h)
h = self.res4b_relu(TEST, h)
h = self.res5a_relu(TEST, h)
h = self.res5b_relu(TEST, h)
y = chainer.functions.average_pooling_2d(h, h.data.shape[2:])
return y |
.parametrize('qurl, expected', [(QUrl('ftp://example.com/'), ('ftp', 'example.com', 21)), (QUrl('ftp://example.com:2121/'), ('ftp', 'example.com', 2121)), (QUrl(' (' 'qutebrowser.org', 8010)), (QUrl(' (' 'example.com', 443)), (QUrl(' (' 'example.com', 4343)), (QUrl(' (' 'qutebrowser.org', 80))])
def test_host_tuple_valid(qurl, expected):
assert (urlutils.host_tuple(qurl) == expected) |
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', action='store_const', const=True, default=False)
parser.add_argument('-b', '--black', action='store_const', const=True, default=False)
parser.add_argument('-f', '--flake', action='store_const', const=True, default=False)
parser.add_argument('-c', '--check', action='store_const', const=True, default=False, help='Only check (no correction)')
parser.add_argument('-s', '--sort', action='store_const', const=True, default=False)
parser.add_argument('-r', '--repo', action='store_const', const=True, default=False, help='Running the linter on the whole repository')
return parser |
def degrade(species, kdeg):
def degrade_name_func(rule_expression):
cps = rule_expression.reactant_pattern.complex_patterns
return '_'.join((_complex_pattern_label(cp) for cp in cps))
if isinstance(species, Monomer):
species = species()
species = as_complex_pattern(species)
return _macro_rule('degrade', (species >> None), [kdeg], ['k'], name_func=degrade_name_func) |
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if (name == 'source'):
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token, opt.src_bos_token, opt.src_eos_token], lower=opt.lower)
elif (name == 'target'):
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token, opt.tgt_bos_token, opt.tgt_eos_token], lower=opt.lower)
else:
print('Warning: check the name')
exit((- 1))
for filename in filenames:
print(('Generating vocabulary from file %s ... ' % filename))
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print(('Created dictionary of size %d (pruned from %d)' % (vocab.size(), original_size)))
return vocab |
class _RopeConfigSource(Source):
name: str = 'config.py'
run_globals: Dict
def __init__(self, ropefolder: Folder):
self.ropefolder = ropefolder
self.run_globals = {}
def _read(self) -> bool:
if ((self.ropefolder is None) or (not self.ropefolder.has_child('config.py'))):
return False
config = self.ropefolder.get_child('config.py')
self.run_globals.update({'__name__': '__main__', '__builtins__': __builtins__, '__file__': config.real_path})
with open(config.real_path) as f:
code = compile(f.read(), config.real_path, 'exec')
exec(code, self.run_globals)
return True
def parse(self) -> Optional[Dict]:
prefs = Prefs()
if (not self._read()):
return None
if ('set_prefs' in self.run_globals):
self.run_globals['set_prefs'](prefs)
if ('project_opened' in self.run_globals):
prefs['project_opened'] = self.run_globals['project_opened']
return asdict(prefs) |
class CLexer(object):
def __init__(self, cparser):
self.cparser = cparser
self.type_names = set()
def input(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
while (self.pos < len(self.tokens)):
t = self.tokens[self.pos]
self.pos += 1
if (not t):
break
if (t.type == 'PP_DEFINE'):
(name, value) = t.value
self.cparser.handle_define(name, value, t.filename, t.lineno)
continue
elif (t.type == 'PP_DEFINE_CONSTANT'):
(name, value) = t.value
self.cparser.handle_define_constant(name, value, t.filename, t.lineno)
continue
elif (t.type == 'PP_IFNDEF'):
self.cparser.handle_ifndef(t.value, t.filename, t.lineno)
continue
if (t.type == 'LPAREN'):
t.type = '('
elif (t.type == 'PP_NUMBER'):
t.type = 'CONSTANT'
elif ((t.type == 'IDENTIFIER') and (t.value in keywords)):
t.type = t.value.upper()
elif ((t.type == 'IDENTIFIER') and (t.value in self.type_names)):
t.type = 'TYPE_NAME'
t.lexer = self
return t
return None |
class SourceLinesAdapter():
def __init__(self, source_code):
self.code = source_code
self.starts = None
self._initialize_line_starts()
def _initialize_line_starts(self):
self.starts = []
self.starts.append(0)
try:
i = 0
while True:
i = (self.code.index('\n', i) + 1)
self.starts.append(i)
except ValueError:
pass
self.starts.append((len(self.code) + 1))
def get_line(self, lineno):
return self.code[self.starts[(lineno - 1)]:(self.starts[lineno] - 1)]
def length(self):
return (len(self.starts) - 1)
def get_line_number(self, offset):
return bisect.bisect(self.starts, offset)
def get_line_start(self, lineno):
return self.starts[(lineno - 1)]
def get_line_end(self, lineno):
return (self.starts[lineno] - 1) |
def kasten96_lt(airmass_absolute, precipitable_water, aod_bb):
delta_cda = ((- 0.101) + (0.235 * (airmass_absolute ** (- 0.16))))
delta_w = ((0.112 * (airmass_absolute ** (- 0.55))) * (precipitable_water ** 0.34))
delta_a = aod_bb
lt = (((- (9.4 + (0.9 * airmass_absolute))) * np.log(np.exp(((- airmass_absolute) * ((delta_cda + delta_w) + delta_a))))) / airmass_absolute)
return lt |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv3d(1, 32, 3, padding=1)
self.conv2 = nn.Conv3d(32, 32, 3, padding=1)
self.conv3 = nn.Conv3d(32, 64, 3, padding=1)
self.conv4 = nn.Conv3d(64, 64, 3, padding=1)
self.conv5 = nn.Conv3d(64, 128, 3, padding=1)
self.conv6 = nn.Conv3d(128, 128, 3, padding=1)
self.fc11 = nn.Linear(16000, 1024)
self.fc12 = nn.Linear(1024, 1024)
self.fc13 = nn.Linear(1024, 4)
self.d = nn.Dropout(0.7)
def forward(self, x):
x = F.max_pool3d(F.relu(self.conv2(F.relu(self.conv1(x)))), kernel_size=2, stride=2)
x = F.max_pool3d(F.relu(self.conv4(F.relu(self.conv3(x)))), kernel_size=2, stride=2)
x = F.max_pool3d(F.relu(self.conv6(F.relu(self.conv5(x)))), kernel_size=2, stride=2)
x_feat = x.view((- 1), self.num_flat_features(x))
x = self.fc13(self.d(F.relu(self.fc12(self.d(F.relu(self.fc11(x_feat)))))))
return (x_feat, x)
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features |
.network
def test_get_transform_grid_list__source_id():
grids = get_transform_grid_list(bbox=BBox(170, (- 90), (- 170), 90), source_id='us_noaa', include_already_downloaded=True)
assert (len(grids) > 5)
source_ids = set()
for grid in grids:
source_ids.add(grid['properties']['source_id'])
assert (sorted(source_ids) == ['us_noaa']) |
def to_cpu(list_of_tensor):
if isinstance(list_of_tensor[0], list):
list_list_of_tensor = list_of_tensor
list_of_tensor = [to_cpu(list_of_tensor) for list_of_tensor in list_list_of_tensor]
else:
list_of_tensor = [tensor.cpu() for tensor in list_of_tensor]
return list_of_tensor |
def wsproto_demo(host: str, port: int) -> None:
print(f'Connecting to {host}:{port}')
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((host, port))
print('Opening WebSocket')
ws = WSConnection(ConnectionType.CLIENT)
net_send(ws.send(Request(host=host, target='server')), conn)
net_recv(ws, conn)
handle_events(ws)
message = 'wsproto is great'
print(f'Sending message: {message}')
net_send(ws.send(Message(data=message)), conn)
net_recv(ws, conn)
handle_events(ws)
payload = b'table tennis'
print(f'Sending ping: {payload!r}')
net_send(ws.send(Ping(payload=payload)), conn)
net_recv(ws, conn)
handle_events(ws)
print('Closing WebSocket')
net_send(ws.send(CloseConnection(code=1000, reason='sample reason')), conn)
net_recv(ws, conn)
conn.shutdown(socket.SHUT_WR)
net_recv(ws, conn) |
def test_hotstart():
HSF_PATH = MODEL_WEIR_SETTING_PATH.replace('.inp', '.hsf')
if os.path.exists(HSF_PATH):
os.remove(HSF_PATH)
assert (not os.path.exists(HSF_PATH))
with Simulation(MODEL_WEIR_SETTING_PATH) as sim:
J1 = Nodes(sim)['J1']
for (ind, step) in enumerate(sim):
if (ind == 10):
sim.save_hotstart(HSF_PATH)
J1_dep = J1.depth
break
assert os.path.exists(HSF_PATH)
with Simulation(MODEL_WEIR_SETTING_PATH) as sim:
def store_J1_depth_before_step():
sim.J1_depth = Nodes(sim)['J1'].depth
sim.add_before_step(store_J1_depth_before_step)
sim.use_hotstart(HSF_PATH)
for (ind, step) in enumerate(sim):
break
assert (sim.J1_depth == pytest.approx(J1_dep, 1e-05)) |
class ChartElement(Element):
def __init__(self, chart: Chart, figsize: Tuple[(float, float)]=None, dpi=250, optimise=False, grid_proportion=GridProportion.Eight, comment: str='', html_figsize: Tuple[(float, float)]=None, float_setting: str=None, **savefig_settings):
super().__init__(grid_proportion)
self._chart = chart
self._filename = '{}.png'.format(uuid.uuid4())
self.figsize = figsize
self.html_figsize = html_figsize
self.dpi = dpi
self.optimise = optimise
self.grid_proportion = grid_proportion
self.comment = comment
self.savefig_settings = savefig_settings
self.float_setting = float_setting
self.logger = qf_logger.getChild(self.__class__.__name__)
def get_grid_proportion_css_class(self) -> str:
return str(self.grid_proportion)
def generate_json(self) -> str:
try:
result = ('data:image/png;base64,' + self._chart.render_as_base64_image(self.figsize, self.dpi, self.optimise))
except Exception as ex:
error_message = '{}\n{}'.format(ex.__class__.__name__, traceback.format_exc())
self.logger.exception('Chart generation error:')
self.logger.exception(error_message)
result = error_message
self._chart.close()
return result
def generate_html(self, document: Optional[Document]=None) -> str:
try:
base64 = self._chart.render_as_base64_image(self.figsize, self.dpi, self.optimise, **self.savefig_settings)
env = templates.environment
template = env.get_template('chart.html')
if self.html_figsize:
(width, height) = self.html_figsize
result = template.render(data=base64, width=f'{width}px', height=f'{height}px', float=self.float_setting)
else:
result = template.render(data=base64, width='100%', float=self.float_setting)
except Exception as ex:
error_message = '{}\n{}'.format(ex.__class__.__name__, traceback.format_exc())
self.logger.exception('Chart generation error:')
self.logger.exception(error_message)
result = "<h2 class='chart-render-failure'>Failed to render chart</h1>"
if (self._chart is not None):
self._chart.close()
result += self._create_html_comment()
return result
def _create_html_comment(self):
template = Template('\n <p class="comment">{{ comment }}</p>\n ')
return template.render(comment=self.comment) |
_stabilize
_specialize
_rewriter([log])
def local_log1p(fgraph, node):
if (node.op == log):
(log_arg,) = node.inputs
if (log_arg.owner and (log_arg.owner.op == add)):
(scalars, scalar_inputs, nonconsts) = scalarconsts_rest(log_arg.owner.inputs, only_process_constants=True)
if (scalars and np.allclose(np.sum(scalars), 1)):
if nonconsts:
if (len(nonconsts) > 1):
ninp = add(*nonconsts)
else:
ninp = nonconsts[0]
if (ninp.dtype != log_arg.type.dtype):
ninp = ninp.astype(node.outputs[0].dtype)
return [alloc_like(log1p(ninp), node.outputs[0], fgraph)]
elif (log_arg.owner and (log_arg.owner.op == sub)):
one = extract_constant(log_arg.owner.inputs[0], only_process_constants=True)
if (one != 1):
return
other = log_arg.owner.inputs[1]
if (other.dtype != log_arg.dtype):
other = other.astype(log_arg.dtype)
return [log1p(neg(other))] |
class CleoNamespaceNotFoundError(CleoUserError):
def __init__(self, name: str, namespaces: (list[str] | None)=None) -> None:
message = f'There are no commands in the "{name}" namespace.'
if namespaces:
suggestions = _suggest_similar_names(name, namespaces)
if suggestions:
message += ('\n\n' + suggestions)
super().__init__(message) |
def test_index(stream):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
a = DataFrame(example=df, stream=stream)
b = (a.index + 5)
L = b.stream.gather().sink_to_list()
a.emit(df)
a.emit(df)
wait_for((lambda : (len(L) > 1)), timeout=2, period=0.05)
assert_eq(L[0], (df.index + 5))
assert_eq(L[1], (df.index + 5)) |
def channel_deposit_with_the_same_token_network(deposit_queue: List[ChannelDeposit]) -> None:
while deposit_queue:
to_delete = []
for (pos, channel_deposit) in enumerate(deposit_queue):
channel = channel_details(channel_deposit.endpoint, channel_deposit.token_address, channel_deposit.partner)
if (channel is None):
continue
deposit = necessary_deposit(channel, channel_deposit.minimum_capacity)
to_delete.append(pos)
if deposit:
current_total_deposit = int(channel['total_deposit'])
new_total_deposit = (current_total_deposit + deposit)
deposit_json = {'total_deposit': new_total_deposit}
log.info(f'Depositing to channel {channel_deposit}')
url_channel = f'{channel_deposit.endpoint}/api/{API_VERSION}/channels/{channel_deposit.token_address}/{channel_deposit.partner}'
response = requests.patch(url_channel, json=deposit_json)
assert (response, response.text)
else:
log.info(f'Channel exists and has enough capacity {channel_deposit}')
for pos in reversed(to_delete):
deposit_queue.pop(pos) |
.parametrize('shape', [(), (3, 4)])
.parametrize('dtype', [None, torch.float, torch.double, torch.int])
.parametrize('device', ([None] + get_available_devices()))
.parametrize('from_path', [True, False])
class TestConstructors():
.parametrize('shape_arg', ['expand', 'arg', 'kwarg'])
def test_zeros(self, shape, dtype, device, tmp_path, from_path, shape_arg):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
if ((device is not None) and (device.type != 'cpu')):
with pytest.raises(RuntimeError):
MemoryMappedTensor.zeros(shape, dtype=dtype, device=device, filename=filename)
return
if (shape_arg == 'expand'):
with (pytest.raises(TypeError) if (shape == ()) else nullcontext()):
t = MemoryMappedTensor.zeros(*shape, dtype=dtype, device=device, filename=filename)
if (shape == ()):
return
elif (shape_arg == 'arg'):
t = MemoryMappedTensor.zeros(shape, dtype=dtype, device=device, filename=filename)
elif (shape_arg == 'kwarg'):
t = MemoryMappedTensor.zeros(shape=shape, dtype=dtype, device=device, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
assert (t == 0).all()
.parametrize('shape_arg', ['expand', 'arg', 'kwarg'])
def test_ones(self, shape, dtype, device, tmp_path, from_path, shape_arg):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
if ((device is not None) and (device.type != 'cpu')):
with pytest.raises(RuntimeError):
MemoryMappedTensor.ones(shape, dtype=dtype, device=device, filename=filename)
return
if (shape_arg == 'expand'):
with (pytest.raises(TypeError) if (shape == ()) else nullcontext()):
t = MemoryMappedTensor.ones(*shape, dtype=dtype, device=device, filename=filename)
if (shape == ()):
return
elif (shape_arg == 'arg'):
t = MemoryMappedTensor.ones(shape, dtype=dtype, device=device, filename=filename)
elif (shape_arg == 'kwarg'):
t = MemoryMappedTensor.ones(shape=shape, dtype=dtype, device=device, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
assert (t == 1).all()
.parametrize('shape_arg', ['expand', 'arg', 'kwarg'])
def test_empty(self, shape, dtype, device, tmp_path, from_path, shape_arg):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
if ((device is not None) and (device.type != 'cpu')):
with pytest.raises(RuntimeError):
MemoryMappedTensor.empty(shape, dtype=dtype, device=device, filename=filename)
return
if (shape_arg == 'expand'):
with (pytest.raises(TypeError) if (shape == ()) else nullcontext()):
t = MemoryMappedTensor.empty(*shape, dtype=dtype, device=device, filename=filename)
if (shape == ()):
return
elif (shape_arg == 'arg'):
t = MemoryMappedTensor.empty(shape, dtype=dtype, device=device, filename=filename)
elif (shape_arg == 'kwarg'):
t = MemoryMappedTensor.empty(shape=shape, dtype=dtype, device=device, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
.parametrize('shape_arg', ['expand', 'arg', 'kwarg'])
def test_full(self, shape, dtype, device, tmp_path, from_path, shape_arg):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
if ((device is not None) and (device.type != 'cpu')):
with pytest.raises(RuntimeError):
MemoryMappedTensor.full(shape, fill_value=2, dtype=dtype, device=device, filename=filename)
return
if (shape_arg == 'expand'):
with (pytest.raises(TypeError) if (shape == ()) else nullcontext()):
t = MemoryMappedTensor.full(*shape, fill_value=2, dtype=dtype, device=device, filename=filename)
if (shape == ()):
return
elif (shape_arg == 'arg'):
t = MemoryMappedTensor.full(shape, fill_value=2, dtype=dtype, device=device, filename=filename)
elif (shape_arg == 'kwarg'):
t = MemoryMappedTensor.full(shape=shape, fill_value=2, dtype=dtype, device=device, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
assert (t == 2).all()
def test_zeros_like(self, shape, dtype, device, tmp_path, from_path):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
tensor = (- torch.ones(shape, dtype=dtype, device=device))
t = MemoryMappedTensor.zeros_like(tensor, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
assert (t == 0).all()
def test_ones_like(self, shape, dtype, device, tmp_path, from_path):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
tensor = (- torch.ones(shape, dtype=dtype, device=device))
t = MemoryMappedTensor.ones_like(tensor, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
assert (t == 1).all()
def test_full_like(self, shape, dtype, device, tmp_path, from_path):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
tensor = (- torch.ones(shape, dtype=dtype, device=device))
t = MemoryMappedTensor.full_like(tensor, 2, filename=filename)
assert (t.shape == shape)
if (dtype is not None):
assert (t.dtype is dtype)
if (filename is not None):
assert (t.filename == filename)
assert (t == 2).all()
def test_from_filename(self, shape, dtype, device, tmp_path, from_path):
if from_path:
filename = (tmp_path / 'file.memmap')
else:
filename = None
if (dtype is None):
dtype = torch.float32
tensor = (- torch.randint(10, shape, dtype=dtype, device=device))
t = MemoryMappedTensor.full_like(tensor, 2, filename=filename)
if (filename is not None):
t2 = MemoryMappedTensor.from_filename(filename, dtype=dtype, shape=shape)
else:
t2 = MemoryMappedTensor.from_handler(t._handler, dtype=dtype, shape=shape, index=None)
torch.testing.assert_close(t, t2) |
def evaluate(ground_truth_path, result_path, subset, top_k, ignore):
(ground_truth, class_labels_map) = load_ground_truth(ground_truth_path, subset)
result = load_result(result_path, top_k, class_labels_map)
n_ground_truth = len(ground_truth)
ground_truth = remove_nonexistent_ground_truth(ground_truth, result)
if ignore:
n_ground_truth = len(ground_truth)
print('calculate top-{} accuracy'.format(top_k))
correct = [(1 if (line[1] in result[line[0]]) else 0) for line in ground_truth]
accuracy = (sum(correct) / n_ground_truth)
print('correct sample: {}; all sample: {}'.format(sum(correct), n_ground_truth))
print('top-{} accuracy: {:.2f}'.format(top_k, (accuracy * 100)))
print('{:.2f}'.format((accuracy * 100)))
return accuracy |
class ErrorCode(enum.Enum):
bad_star_import = 1
cant_import = 2
unexpected_node = 3
undefined_name = 4
undefined_attribute = 5
attribute_is_never_set = 6
duplicate_dict_key = 7
unhashable_key = 8
bad_unpack = 9
unsupported_operation = 10
not_callable = 11
incompatible_call = 12
method_first_arg = 13
bad_super_call = 14
impure_async_call = 15
unnecessary_yield = 16
class_variable_redefinition = 21
bad_global = 22
condition_always_true = 23
inference_failure = 24
bad_format_string = 25
yield_without_value = 28
invalid_method_return_type = 30
missing_asynq = 31
bad_exception = 32
bad_async_yield = 34
add_import = 35
duplicate_yield = 36
yield_in_comprehension = 37
use_floor_div = 38
task_needs_yield = 39
mixing_bytes_and_text = 40
bad_except_handler = 41
implicit_non_ascii_string = 42
missing_await = 43
unused_variable = 44
bad_nonlocal = 45
non_boolean_in_boolean_context = 46
use_fstrings = 47
import_failed = 48
unused_ignore = 49
possibly_undefined_name = 50
missing_f = 51
incompatible_return_value = 52
incompatible_argument = 53
incompatible_default = 54
internal_error = 55
bad_yield_from = 56
incompatible_assignment = 57
invalid_typeddict_key = 58
invalid_annotation = 59
bare_ignore = 60
duplicate_enum_member = 61
missing_return_annotation = 62
missing_parameter_annotation = 63
type_always_true = 64
value_always_true = 65
type_does_not_support_bool = 66
missing_return = 67
no_return_may_return = 68
implicit_reexport = 69
invalid_context_manager = 70
suggested_return_type = 71
suggested_parameter_type = 72
incompatible_override = 73
impossible_pattern = 74
bad_match = 75
bad_evaluator = 76
implicit_any = 77
already_declared = 78
invalid_annotated_assignment = 79
unused_assignment = 80
incompatible_yield = 81
invalid_import = 82
too_many_positional_args = 83
deprecated = 84
invalid_override_decorator = 85
override_does_not_override = 86
reveal_type = 87
missing_generic_parameters = 88
disallowed_import = 89 |
def test_scenarios_none_found(pytester, pytest_params):
testpath = pytester.makepyfile("\n import pytest\n from pytest_bdd import scenarios\n\n scenarios('.')\n ")
result = pytester.runpytest_subprocess(testpath, *pytest_params)
result.assert_outcomes(errors=1)
result.stdout.fnmatch_lines(['*NoScenariosFound*']) |
class PLMSSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
if (ddim_eta != 0):
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
(samples, intermediates) = self.plms_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
return (samples, intermediates)
_grad()
def plms_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
print(f'Running PLMS Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min((i + 1), (len(time_range) - 1))], device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next)
(img, pred_x0, e_t) = outs
old_eps.append(e_t)
if (len(old_eps) >= 4):
old_eps.pop(0)
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, old_eps=None, t_next=None):
(b, *_, device) = (*x.shape, x.device)
def get_model_output(x, t):
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
def get_x_prev_and_pred_x0(e_t, index):
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0)
e_t = get_model_output(x, t)
if (len(old_eps) == 0):
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = ((e_t + e_t_next) / 2)
elif (len(old_eps) == 1):
e_t_prime = (((3 * e_t) - old_eps[(- 1)]) / 2)
elif (len(old_eps) == 2):
e_t_prime = ((((23 * e_t) - (16 * old_eps[(- 1)])) + (5 * old_eps[(- 2)])) / 12)
elif (len(old_eps) >= 3):
e_t_prime = (((((55 * e_t) - (59 * old_eps[(- 1)])) + (37 * old_eps[(- 2)])) - (9 * old_eps[(- 3)])) / 24)
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t_prime, index)
return (x_prev, pred_x0, e_t) |
class StateActionDynEVAE(nn.Module):
def __init__(self, traj_size, action_embed_size, state_embed_size, stack=4):
super().__init__()
self.traj_size = traj_size
self.action_embed_size = action_embed_size
self.state_embed_size = state_embed_size
self.stack = stack
self.hidden_size = 400
self.action_encoder_layers = nn.ModuleList([nn.Linear(traj_size, self.hidden_size), *[nn.Linear(self.hidden_size, self.hidden_size) for _ in range(1)]])
self.lin_action_mu = nn.Linear(self.hidden_size, self.action_embed_size)
self.lin_action_sigma = nn.Linear(self.hidden_size, self.action_embed_size)
self.state_encoder = nn.Sequential(nn.BatchNorm2d((3 * self.stack)), nn.Conv2d((3 * self.stack), 32, 4, 2, 1), nn.ReLU(inplace=True), nn.Conv2d(32, (32 * 2), 4, 2, 1), nn.BatchNorm2d((32 * 2)), nn.ReLU(inplace=True), nn.Conv2d((32 * 2), (32 * 4), 4, 2, 1), nn.BatchNorm2d((32 * 4)), nn.ReLU(inplace=True), nn.Conv2d((32 * 4), (32 * 8), 4, 2, 1), nn.BatchNorm2d((32 * 8)), nn.ReLU(inplace=True), nn.Conv2d((32 * 8), self.hidden_size, 4, 1, 0), nn.ReLU(inplace=True))
self.lin_state_mu = nn.Linear(self.hidden_size, self.state_embed_size)
self.lin_state_sigma = nn.Linear(self.hidden_size, self.state_embed_size)
self.decoder1 = Decoder(self.state_embed_size, self.action_embed_size, self.hidden_size)
self.decoder2 = Decoder(self.state_embed_size, self.action_embed_size, self.hidden_size)
def encode_state(self, x):
x = self.state_encoder(x).reshape(x.size(0), (- 1))
return (self.lin_state_mu(x), self.lin_state_sigma(x))
def encode_actions(self, x):
x = x.view((- 1), self.traj_size)
for layer in self.action_encoder_layers:
x = F.selu(layer(x))
(mu, log_sigma2) = (self.lin_action_mu(x), self.lin_action_sigma(x))
return (mu, log_sigma2)
def reparameterize(self, mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def sample(self, n=1):
with torch.no_grad():
state_noise = torch.Tensor(n, self.state_embed_size).normal_().cuda()
action_noise = torch.Tensor(n, self.action_embed_size).normal_().cuda()
return self.decode(state_noise, action_noise)
def forward(self, s, a):
(state_mu, state_logvar) = self.encode_state(s)
state_z = self.reparameterize(state_mu, state_logvar)
(action_mu, action_logvar) = self.encode_actions(a)
action_z = self.reparameterize(action_mu, action_logvar)
return (self.decode(state_z, action_z), state_mu, state_logvar, action_mu, action_logvar)
def decode(self, state_z, action_z):
decoded1 = self.decoder1(state_z, action_z)
decoded2 = self.decoder2(state_z, action_z)
return torch.cat([decoded1, decoded2], dim=1) |
def create_train_state(model: FlaxAutoModelForSequenceClassification, learning_rate_fn: Callable[([int], float)], is_regression: bool, num_labels: int, weight_decay: float) -> train_state.TrainState:
class TrainState(train_state.TrainState):
logits_fn: Callable = struct.field(pytree_node=False)
loss_fn: Callable = struct.field(pytree_node=False)
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
flat_mask = {path: ((path[(- 1)] != 'bias') and (path[(- 2):] != ('LayerNorm', 'scale'))) for path in flat_params}
return traverse_util.unflatten_dict(flat_mask)
tx = optax.adamw(learning_rate=learning_rate_fn, b1=0.9, b2=0.999, eps=1e-06, weight_decay=weight_decay, mask=decay_mask_fn)
if is_regression:
def mse_loss(logits, labels):
return jnp.mean(((logits[(..., 0)] - labels) ** 2))
return TrainState.create(apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=(lambda logits: logits[(..., 0)]), loss_fn=mse_loss)
else:
def cross_entropy_loss(logits, labels):
xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels))
return jnp.mean(xentropy)
return TrainState.create(apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=(lambda logits: logits.argmax((- 1))), loss_fn=cross_entropy_loss) |
def test_get_module_raises():
with pytest.raises(PyModuleNotFoundError) as err:
moduleloader.get_module('unlikelyblahmodulenameherexxssz')
assert (str(err.value) == "unlikelyblahmodulenameherexxssz.py should be in your pipeline dir, or in your working dir, or it should be installed in the current python env.\nIf you have 'package.sub.mod' your pipeline dir should contain ./package/sub/mod.py\nIf you specified 'mymodulename', your pipeline dir should contain ./mymodulename.py\nIf the module is not in your pipeline dir nor in the current working dir, it must exist in your current python env - so you should have run pip install or setup.py") |
def construct_odenet(dims):
layers = []
for (in_dim, out_dim) in zip(dims[:(- 1)], dims[1:]):
layers.append(diffeq_layers.ConcatLinear(in_dim, out_dim))
layers.append(basic_layers.TimeDependentSwish(out_dim))
layers = layers[:(- 1)]
return container_layers.SequentialDiffEq(*layers) |
class LxDeviceListClass(gdb.Command):
def __init__(self):
super(LxDeviceListClass, self).__init__('lx-device-list-class', gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
if (not arg):
for cls in for_each_class():
gdb.write('class {}:\t{}\n'.format(cls['name'].string(), cls))
for dev in class_for_each_device(cls):
_show_device(dev, level=1)
else:
cls = get_class_by_name(arg)
for dev in class_for_each_device(cls):
_show_device(dev) |
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos: list[T] = []
self.kwonly: dict[(str, T)] = {}
self.varpos: (T | None) = None
self.varkw: (T | None) = None
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> (str | None):
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str((arg.variable.type or arg.type_annotation))
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return bool((arg.default != inspect.Parameter.empty))
if isinstance(arg, nodes.Argument):
return arg.kind.is_optional()
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return ((get_name(arg) + (f': {arg_type}' if arg_type else '')) + (' = ...' if has_default(arg) else ''))
kw_only = sorted(self.kwonly.values(), key=(lambda a: (has_default(a), get_name(a))))
ret = 'def ('
ret += ', '.join(((([get_desc(arg) for arg in self.pos] + ([('*' + get_name(self.varpos))] if self.varpos else (['*'] if self.kwonly else []))) + [get_desc(arg) for arg in kw_only]) + ([('**' + get_name(self.varkw))] if self.varkw else [])))
ret += ')'
return ret
def from_funcitem(stub: nodes.FuncItem) -> Signature[nodes.Argument]:
stub_sig: Signature[nodes.Argument] = Signature()
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind.is_positional():
stub_sig.pos.append(stub_arg)
elif stub_arg.kind.is_named():
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif (stub_arg.kind == nodes.ARG_STAR):
stub_sig.varpos = stub_arg
elif (stub_arg.kind == nodes.ARG_STAR2):
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
def from_inspect_signature(signature: inspect.Signature) -> Signature[inspect.Parameter]:
runtime_sig: Signature[inspect.Parameter] = Signature()
for runtime_arg in signature.parameters.values():
if (runtime_arg.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)):
runtime_sig.pos.append(runtime_arg)
elif (runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY):
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif (runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL):
runtime_sig.varpos = runtime_arg
elif (runtime_arg.kind == inspect.Parameter.VAR_KEYWORD):
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> Signature[nodes.Argument]:
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args: dict[(str, list[tuple[(nodes.Argument, int)]])] = {}
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert (func is not None)
args = maybe_strip_cls(stub.name, func.arguments)
for (index, arg) in enumerate(args):
name = (f'__{index}' if (arg.variable.name.startswith('__') or assume_positional_only) else arg.variable.name)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
return max((index for (_, index) in all_args[arg_name]))
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.state.strict_optional_set(True):
all_types = [(arg.variable.type or arg.type_annotation) for (arg, _) in all_args[arg_name]]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> nodes.ArgKind:
kinds = {arg.kind for (arg, _) in all_args[arg_name]}
if (nodes.ARG_STAR in kinds):
return nodes.ARG_STAR
if (nodes.ARG_STAR2 in kinds):
return nodes.ARG_STAR2
is_opt = ((len(all_args[arg_name]) < len(stub.items)) or (nodes.ARG_OPT in kinds) or (nodes.ARG_NAMED_OPT in kinds))
is_pos = ((nodes.ARG_OPT in kinds) or (nodes.ARG_POS in kinds))
if is_opt:
return (nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT)
return (nodes.ARG_POS if is_pos else nodes.ARG_NAMED)
sig: Signature[nodes.Argument] = Signature()
for arg_name in sorted(all_args, key=get_position):
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(nodes.Var(example_arg_name, get_type(arg_name)), type_annotation=None, initializer=None, kind=get_kind(arg_name))
if arg.kind.is_positional():
sig.pos.append(arg)
elif arg.kind.is_named():
sig.kwonly[arg.variable.name] = arg
elif (arg.kind == nodes.ARG_STAR):
sig.varpos = arg
elif (arg.kind == nodes.ARG_STAR2):
sig.varkw = arg
else:
raise AssertionError
return sig |
def assert_column_equality(output_df: DataFrame, target_df: DataFrame, output_column: Column, target_column: Column) -> None:
if (not ((output_df.select(output_column).count() == target_df.select(target_column).count()) and (len(target_df.columns) == len(output_df.columns)))):
raise AssertionError(f'''DataFrame shape mismatch:
output_df shape: {len(output_df.columns)} columns and {output_df.count()} rows
target_df shape: {len(target_df.columns)} columns and {target_df.count()} rows.''')
output_data = output_df.selectExpr(f'{output_column} as {target_column}').collect()
target_data = target_df.select(target_column).collect()
if (not (output_data == target_data)):
raise AssertionError(f'''Columns have different values:
output_column records: {output_data}
target_column records: {target_data}.''') |
class DataCollatorForWav2Vec2Pretraining():
model: Wav2Vec2ForPreTraining
feature_extractor: Wav2Vec2FeatureExtractor
padding: Union[(bool, str)] = 'longest'
pad_to_multiple_of: Optional[int] = None
max_length: Optional[int] = None
def __call__(self, features: List[Dict[(str, Union[(List[int], torch.Tensor)])]]) -> Dict[(str, torch.Tensor)]:
batch = self.feature_extractor.pad(features, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt')
mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[(- 1)])
batch_size = batch['input_values'].shape[0]
if (batch['attention_mask'] is not None):
output_lengths = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum((- 1))).to(torch.long)
attention_mask = torch.zeros((batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['input_values'].device)
attention_mask[(torch.arange(attention_mask.shape[0], device=batch['input_values'].device), (output_lengths - 1))] = 1
attention_mask = attention_mask.flip([(- 1)]).cumsum((- 1)).flip([(- 1)]).bool()
batch['mask_time_indices'] = _compute_mask_indices((batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, device=batch['input_values'].device, attention_mask=attention_mask, min_masks=2)
return batch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.