code stringlengths 281 23.7M |
|---|
def _log_post(post_details: PostDetails, result: PostResultModel, insert_time, cache_time):
total = ((insert_time + cache_time) + post_details.file_time)
file_time_str = ('file: {}ms, '.format(post_details.file_time) if post_details.file_time else '')
s = '{}db: {}ms, caches: {}ms, total: {}ms'
timings = s.format(file_time_str, insert_time, cache_time, total)
post_type = ('thread' if (result.post_refno == 1) else 'reply')
log = 'new {} /{}/{}#{} ({})'.format(post_type, result.board_name, result.thread_refno, result.post_refno, timings)
mod_log(log, ip4_str=ip4_to_str(post_details.ip4)) |
class SegmentationLineFeaturesProvider():
def __init__(self, document_features_context: DocumentFeaturesContext, use_first_token_of_block: bool):
self.document_features_context = document_features_context
self.use_first_token_of_block = use_first_token_of_block
def iter_line_features(self, layout_document: LayoutDocument) -> Iterable[SegmentationLineFeatures]:
segmentation_line_features = SegmentationLineFeatures(document_features_context=self.document_features_context)
previous_token: Optional[LayoutToken] = None
segmentation_line_features.document_token_count = sum((len(line.tokens) for block in layout_document.iter_all_blocks() for line in block.lines))
pattern_candididate_block_iterable = (block for page in layout_document.pages for (block_index, block) in enumerate(page.blocks) if ((block_index < 2) or (block_index > (len(page.blocks) - 2))))
pattern_candididate_line_iterable = (block.lines[0] for block in pattern_candididate_block_iterable if (block.lines and block.lines[0].tokens))
all_pattern_by_line_id = {id(line): get_text_pattern(line.text) for line in pattern_candididate_line_iterable}
LOGGER.debug('all_pattern_by_line_id: %s', all_pattern_by_line_id)
pattern_by_line_id = {key: value for (key, value) in all_pattern_by_line_id.items() if (len(value) >= 8)}
pattern_counter = Counter(pattern_by_line_id.values())
LOGGER.debug('pattern_counter: %s', pattern_counter)
seen_repetitive_patterns: Set[str] = set()
document_token_index = 0
for page in layout_document.pages:
blocks = page.blocks
segmentation_line_features.page_blocks = blocks
for (block_index, block) in enumerate(blocks):
segmentation_line_features.page_block_index = block_index
block_lines = block.lines
segmentation_line_features.block_lines = block_lines
block_line_texts = [line.text for line in block_lines]
max_block_line_text_length = max((len(text) for text in block_line_texts))
first_block_token = next(iter(block.iter_all_tokens()), None)
assert first_block_token
for (line_index, line) in enumerate(block_lines):
segmentation_line_features.document_token_index = document_token_index
document_token_index += len(line.tokens)
segmentation_line_features.layout_line = line
segmentation_line_features.block_line_index = line_index
segmentation_line_features.max_block_line_text_length = max_block_line_text_length
line_text = block_line_texts[line_index]
retokenized_token_texts = re.split(' |\\t|\\f|\\u00A0', line_text)
if (not retokenized_token_texts):
continue
if self.use_first_token_of_block:
token = first_block_token
else:
token = line.tokens[0]
segmentation_line_features.layout_token = token
segmentation_line_features.line_text = line_text
segmentation_line_features.concatenated_line_tokens_text = line_text
segmentation_line_features.token_text = retokenized_token_texts[0].strip()
segmentation_line_features.second_token_text = (retokenized_token_texts[1] if (len(retokenized_token_texts) >= 2) else '')
segmentation_line_features.previous_layout_token = previous_token
line_pattern = pattern_by_line_id.get(id(line), '')
LOGGER.debug('line_pattern: %r', line_pattern)
segmentation_line_features.is_repetitive_pattern = (pattern_counter[line_pattern] > 1)
segmentation_line_features.is_first_repetitive_pattern = (segmentation_line_features.is_repetitive_pattern and (line_pattern not in seen_repetitive_patterns))
if segmentation_line_features.is_first_repetitive_pattern:
seen_repetitive_patterns.add(line_pattern)
(yield segmentation_line_features)
previous_token = token |
def build_sample_db():
db.drop_all()
db.create_all()
first_names = ['Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie', 'Sophie', 'Mia', 'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica', 'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy']
last_names = ['Brown', 'Brown', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas', 'Roberts', 'Khan', 'Clarke', 'Clarke', 'Clarke', 'James', 'Phillips', 'Wilson', 'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander']
countries = [('ZA', 'South Africa', 27, 'ZAR', 'Africa/Johannesburg'), ('BF', 'Burkina Faso', 226, 'XOF', 'Africa/Ouagadougou'), ('US', 'United States of America', 1, 'USD', 'America/New_York'), ('BR', 'Brazil', 55, 'BRL', 'America/Sao_Paulo'), ('TZ', 'Tanzania', 255, 'TZS', 'Africa/Dar_es_Salaam'), ('DE', 'Germany', 49, 'EUR', 'Europe/Berlin'), ('CN', 'China', 86, 'CNY', 'Asia/Shanghai')]
user_list = []
for i in range(len(first_names)):
user = User()
country = random.choice(countries)
user.type = random.choice(AVAILABLE_USER_TYPES)[0]
user.first_name = first_names[i]
user.last_name = last_names[i]
user.email = (first_names[i].lower() + '')
user.website = '
user.ip_address = '127.0.0.1'
user.coutry = country[1]
user.currency = country[3]
user.timezone = country[4]
user.dialling_code = country[2]
user.local_phone_number = ('0' + ''.join(random.choices('', k=9)))
user_list.append(user)
db.session.add(user)
tag_list = []
for tmp in ['YELLOW', 'WHITE', 'BLUE', 'GREEN', 'RED', 'BLACK', 'BROWN', 'PURPLE', 'ORANGE']:
tag = Tag()
tag.name = tmp
tag_list.append(tag)
db.session.add(tag)
sample_text = [{'title': 'de Finibus Bonorum et Malorum - Part I', 'content': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'}, {'title': 'de Finibus Bonorum et Malorum - Part II', 'content': 'Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?'}, {'title': 'de Finibus Bonorum et Malorum - Part III', 'content': 'At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.'}]
for user in user_list:
entry = random.choice(sample_text)
post = Post()
post.user = user
post.title = "{}'s opinion on {}".format(user.first_name, entry['title'])
post.text = entry['content']
post.background_color = random.choice(['#cccccc', 'red', 'lightblue', '#0f0'])
tmp = int((1000 * random.random()))
post.date = (datetime.datetime.now() - datetime.timedelta(days=tmp))
post.tags = random.sample(tag_list, 2)
db.session.add(post)
trunk = Tree(name='Trunk')
db.session.add(trunk)
for i in range(5):
branch = Tree()
branch.name = ('Branch ' + str((i + 1)))
branch.parent = trunk
db.session.add(branch)
for j in range(5):
leaf = Tree()
leaf.name = ('Leaf ' + str((j + 1)))
leaf.parent = branch
db.session.add(leaf)
db.session.commit()
return |
class ChangeStatus(object):
def __init__(self, all_changed: bool=False) -> None:
if all_changed:
self.source_files = 1
self.make_files = 1
else:
self.source_files = 0
self.make_files = 0
def record_change(self, file_name) -> None:
file_name = file_name.lower()
if file_name_is_cmake_file(file_name):
self.make_files += 1
elif ('/fbcode_builder/cmake' in file_name):
self.source_files += 1
elif ('/fbcode_builder/' not in file_name):
self.source_files += 1
def sources_changed(self) -> bool:
return (self.source_files > 0)
def build_changed(self) -> bool:
return (self.make_files > 0) |
def _get_notes(**kwargs) -> str:
if (kwargs['notes_file'] is not None):
if (kwargs['notes'] is None):
with open(kwargs['notes_file'], 'r') as fin:
return fin.read()
else:
click.echo('ERROR: Cannot specify --notes and --notes-file', err=True)
sys.exit(1)
else:
return kwargs['notes'] |
class MsgStub(object):
def __init__(self, channel):
self.CreateVestingAccount = channel.unary_unary('/cosmos.vesting.v1beta1.Msg/CreateVestingAccount', request_serializer=cosmos_dot_vesting_dot_v1beta1_dot_tx__pb2.MsgCreateVestingAccount.SerializeToString, response_deserializer=cosmos_dot_vesting_dot_v1beta1_dot_tx__pb2.MsgCreateVestingAccountResponse.FromString) |
def _dump_groups(groups, op_type, workdir):
fname = f'fuse_group_{op_type}_groups.txt'
file_path = os.path.join(workdir, fname)
with open(file_path, 'w') as f:
for group in groups:
single_group_str = ','.join((op._attrs['name'] for op in group))
f.write(f'''[{single_group_str}]
''')
f.write(graph_utils.sorted_op_pseudo_code(group))
f.write('\n')
_LOGGER.info(f'Dumped groups to {file_path}') |
class OptionSeriesDumbbellLowmarkerStates(Options):
def hover(self) -> 'OptionSeriesDumbbellLowmarkerStatesHover':
return self._config_sub_data('hover', OptionSeriesDumbbellLowmarkerStatesHover)
def normal(self) -> 'OptionSeriesDumbbellLowmarkerStatesNormal':
return self._config_sub_data('normal', OptionSeriesDumbbellLowmarkerStatesNormal)
def select(self) -> 'OptionSeriesDumbbellLowmarkerStatesSelect':
return self._config_sub_data('select', OptionSeriesDumbbellLowmarkerStatesSelect) |
class OptionPlotoptionsFunnelSonificationTracks(Options):
def activeWhen(self) -> 'OptionPlotoptionsFunnelSonificationTracksActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsFunnelSonificationTracksActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsFunnelSonificationTracksMapping':
return self._config_sub_data('mapping', OptionPlotoptionsFunnelSonificationTracksMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionPlotoptionsFunnelSonificationTracksPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsFunnelSonificationTracksPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False) |
def _flush_queue():
global scheduled, queue
scheduled = False
if (not queue):
return
_queue = queue[:MAX_BATCH]
queue = queue[MAX_BATCH:]
entries = []
for q in _queue:
params = {'v': 1, 'tid': TRACK_ID, 'cid': get_settings('uid', '000')}
params.update(q)
entries.append(urllib.parse.urlencode(params))
data = '\n'.join(entries).encode('ascii')
req = urllib.request.Request(HOST, data, method='POST', headers={'User-Agent': get_user_agent(), 'Content-Length': len(data)})
try:
with urllib.request.urlopen(req):
pass
except:
pass
if queue:
schedule_send() |
class OptionSeriesColumnrangeSonificationDefaultinstrumentoptionsMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class JsonFloatTests(TestCase):
def test_dumps(self):
with self.assertRaises(ValueError):
json.dumps(float('inf'))
with self.assertRaises(ValueError):
json.dumps(float('nan'))
def test_loads(self):
with self.assertRaises(ValueError):
json.loads('Infinity')
with self.assertRaises(ValueError):
json.loads('NaN') |
def uploaded_file(files, multiple=False):
if multiple:
files_uploaded = []
for file in files:
extension = file.filename.split('.')[1]
filename = ((get_file_name() + '.') + extension)
filedir = (current_app.config.get('BASE_DIR') + '/static/uploads/')
if (not os.path.isdir(filedir)):
os.makedirs(filedir)
file_path = (filedir + filename)
file.save(file_path)
files_uploaded.append(UploadedFile(file_path, filename))
else:
extension = files.filename.split('.')[1]
filename = ((get_file_name() + '.') + extension)
filedir = (current_app.config.get('BASE_DIR') + '/static/uploads/')
if (not os.path.isdir(filedir)):
os.makedirs(filedir)
file_path = (filedir + filename)
files.save(file_path)
files_uploaded = UploadedFile(file_path, filename)
return files_uploaded |
def delete_post_file(post: PostModel):
if (post.file is None):
raise ArgumentError(MESSAGE_POST_HAS_NO_FILE)
with session() as s:
file_orm_model = s.query(FileOrmModel).filter_by(id=post.file.id).one()
s.delete(file_orm_model)
s.commit()
thread = post.thread
_invalidate_thread_cache(s, thread, thread.board)
_invalidate_board_pages_catalog_cache(s, thread.board)
document_cache.purge_thread(thread.board, thread)
document_cache.purge_board(thread.board) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'firewall_ippool': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['firewall_ippool']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['firewall_ippool']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'firewall_ippool')
(is_error, has_changed, result, diff) = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def extractRpgNovels(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Maou-sama no machizukuri!', 'Maou-sama no Machizukuri!', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class JoinedSet(Set):
def _from_set(cls, obj, jdata=[], ljdata=[], auto_select_tables=[]):
rv = cls(obj.db, obj.query, obj.query.ignore_common_filters, obj._model_)
rv._stable_ = obj._model_.tablename
rv._jdata_ = list(jdata)
rv._ljdata_ = list(ljdata)
rv._auto_select_tables_ = list(auto_select_tables)
rv._pks_ = obj._model_._instance_()._fieldset_pk
return rv
def _clone(self, ignore_common_filters=None, model=None, **changes):
rv = super()._clone(ignore_common_filters, model, **changes)
rv._stable_ = self._stable_
rv._jdata_ = self._jdata_
rv._ljdata_ = self._ljdata_
rv._auto_select_tables_ = self._auto_select_tables_
rv._pks_ = self._pks_
return rv
def _join_set_builder(self, obj, jdata, auto_select_tables):
return JoinedSet._from_set(obj, jdata=(self._jdata_ + jdata), ljdata=self._ljdata_, auto_select_tables=(self._auto_select_tables_ + auto_select_tables))
def _left_join_set_builder(self, jdata):
return JoinedSet._from_set(self, jdata=self._jdata_, ljdata=(self._ljdata_ + jdata), auto_select_tables=self._auto_select_tables_)
def _iterselect_rows(self, *fields, **options):
tablemap = self.db._adapter.tables(self.query, options.get('join', None), options.get('left', None), options.get('orderby', None), options.get('groupby', None))
(fields, concrete_tables) = self.db._adapter._expand_all_with_concrete_tables(fields, tablemap)
(colnames, sql) = self.db._adapter._select_wcols(self.query, fields, **options)
return JoinIterRows(self.db, sql, fields, concrete_tables, colnames)
def _split_joins(self, joins):
rv = {'belongs': [], 'one': [], 'many': []}
for (jname, jtable, rel_type) in joins:
rv[rel_type].append((jname, jtable))
return (rv['belongs'], rv['one'], rv['many'])
def _build_records_from_joined(self, rowmap, inclusions, colnames):
for (rid, many_data) in inclusions.items():
for (jname, included) in many_data.items():
rowmap[rid][jname]._cached_resultset = Rows(self.db, list(included.values()), [])
return JoinRows(self.db, list(rowmap.values()), colnames, _jdata=(self._jdata_ + self._ljdata_))
def _select_rowpks_extractor(self, row):
if (not set(row.keys()).issuperset(self._pks_)):
return None
if (len(self._pks_) > 1):
return tuple((row[pk] for pk in self._pks_))
return row[tuple(self._pks_)[0]]
def _run_select_(self, *fields, **options):
(belongs_j, one_j, many_j) = self._split_joins(self._jdata_)
(belongs_l, one_l, many_l) = self._split_joins(self._ljdata_)
parsers = (self._build_jparsers(belongs_j, one_j, many_j) + self._build_lparsers(belongs_l, one_l, many_l))
if self._ljdata_:
fields = list(fields)
if (not fields):
fields = [v.ALL for v in self._auto_select_tables_]
for join in options['left']:
fields.append(join.first.ALL)
rows = self._iterselect_rows(*fields, **options)
plainrows = []
rowmap = OrderedDict()
inclusions = defaultdict((lambda : {jname: OrderedDict() for (jname, _) in (many_j + many_l)}))
for row in rows:
if (self._stable_ not in row):
plainrows.append(row)
continue
rid = self._select_rowpks_extractor(row[self._stable_])
if (rid is None):
plainrows.append(row)
continue
rowmap[rid] = rowmap.get(rid, row[self._stable_])
for parser in parsers:
parser(rowmap, inclusions, row, rid)
if ((not rowmap) and plainrows):
return Rows(self.db, plainrows, rows.colnames)
return self._build_records_from_joined(rowmap, inclusions, rows.colnames)
def _build_jparsers(self, belongs, one, many):
rv = []
for (jname, jtable) in belongs:
rv.append(self._jbelong_parser(self.db, jname, jtable))
for (jname, jtable) in one:
rv.append(self._jone_parser(self.db, jname, jtable))
for (jname, jtable) in many:
rv.append(self._jmany_parser(self.db, jname, jtable))
return rv
def _build_lparsers(self, belongs, one, many):
rv = []
for (jname, jtable) in belongs:
rv.append(self._lbelong_parser(self.db, jname, jtable))
for (jname, jtable) in one:
rv.append(self._lone_parser(self.db, jname, jtable))
for (jname, jtable) in many:
rv.append(self._lmany_parser(self.db, jname, jtable))
return rv
def _jbelong_parser(db, fieldname, tablename):
rmodel = db[tablename]._model_
def parser(rowmap, inclusions, row, rid):
rowmap[rid][fieldname] = typed_row_reference_from_record(row[tablename], rmodel)
return parser
def _jone_parser(db, fieldname, tablename):
def parser(rowmap, inclusions, row, rid):
rowmap[rid][fieldname]._cached_resultset = row[tablename]
return parser
def _jmany_parser(db, fieldname, tablename):
rmodel = db[tablename]._model_
pks = (rmodel.primary_keys or ['id'])
ext = (lambda row: (tuple((row[pk] for pk in pks)) if (len(pks) > 1) else row[pks[0]]))
def parser(rowmap, inclusions, row, rid):
inclusions[rid][fieldname][ext(row[tablename])] = inclusions[rid][fieldname].get(ext(row[tablename]), row[tablename])
return parser
def _lbelong_parser(db, fieldname, tablename):
rmodel = db[tablename]._model_
pks = (rmodel.primary_keys or ['id'])
check = (lambda row: all((row[pk] for pk in pks)))
def parser(rowmap, inclusions, row, rid):
if (not check(row[tablename])):
return
rowmap[rid][fieldname] = typed_row_reference_from_record(row[tablename], rmodel)
return parser
def _lone_parser(db, fieldname, tablename):
rmodel = db[tablename]._model_
pks = (rmodel.primary_keys or ['id'])
check = (lambda row: all((row[pk] for pk in pks)))
def parser(rowmap, inclusions, row, rid):
if (not check(row[tablename])):
return
rowmap[rid][fieldname]._cached_resultset = row[tablename]
return parser
def _lmany_parser(db, fieldname, tablename):
rmodel = db[tablename]._model_
pks = (rmodel.primary_keys or ['id'])
ext = (lambda row: (tuple((row[pk] for pk in pks)) if (len(pks) > 1) else row[pks[0]]))
check = (lambda row: all((row[pk] for pk in pks)))
def parser(rowmap, inclusions, row, rid):
if (not check(row[tablename])):
return
inclusions[rid][fieldname][ext(row[tablename])] = inclusions[rid][fieldname].get(ext(row[tablename]), row[tablename])
return parser |
class MongoQueryConfig(QueryConfig[MongoStatement]):
def generate_query(self, input_data: Dict[(str, List[Any])], policy: Optional[Policy]=None) -> Optional[MongoStatement]:
def transform_query_pairs(pairs: Dict[(str, Any)]) -> Dict[(str, Any)]:
if (len(pairs) < 2):
return pairs
return {'$or': [dict([(k, v)]) for (k, v) in pairs.items()]}
if input_data:
filtered_data: Dict[(str, Any)] = self.node.typed_filtered_values(input_data)
if filtered_data:
query_pairs = {}
for (string_field_path, data) in filtered_data.items():
if (len(data) == 1):
query_pairs[string_field_path] = data[0]
elif (len(data) > 1):
query_pairs[string_field_path] = {'$in': data}
field_list = {field_path.string_path: 1 for (field_path, field) in self.top_level_field_map().items()}
(query_fields, return_fields) = (transform_query_pairs(query_pairs), field_list)
return (query_fields, return_fields)
logger.warning('There is not enough data to generate a valid query for {}', self.node.address)
return None
def generate_update_stmt(self, row: Row, policy: Policy, request: PrivacyRequest) -> Optional[MongoStatement]:
update_clauses = self.update_value_map(row, policy, request)
pk_clauses: Dict[(str, Any)] = filter_nonempty_values({field_path.string_path: field.cast(row[field_path.string_path]) for (field_path, field) in self.primary_key_field_paths.items()})
valid = ((len(pk_clauses) > 0) and (len(update_clauses) > 0))
if (not valid):
logger.warning('There is not enough data to generate a valid update for {}', self.node.address)
return None
return (pk_clauses, {'$set': update_clauses})
def query_to_str(self, t: MongoStatement, input_data: Dict[(str, List[Any])]) -> str:
(query_data, field_list) = t
db_name = self.node.address.dataset
collection_name = self.node.address.collection
return f'db.{db_name}.{collection_name}.find({query_data}, {field_list})'
def dry_run_query(self) -> Optional[str]:
data = self.display_query_data()
mongo_query = self.generate_query(self.display_query_data(), None)
if (mongo_query is not None):
return self.query_to_str(mongo_query, data)
return None |
class GenericOefSearchHandler(Handler):
SUPPORTED_PROTOCOL = OefSearchMessage.protocol_id
def setup(self) -> None:
def handle(self, message: Message) -> None:
oef_search_msg = cast(OefSearchMessage, message)
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_dialogue = cast(Optional[OefSearchDialogue], oef_search_dialogues.update(oef_search_msg))
if (oef_search_dialogue is None):
self._handle_unidentified_dialogue(oef_search_msg)
return
if (oef_search_msg.performative is OefSearchMessage.Performative.OEF_ERROR):
self._handle_error(oef_search_msg, oef_search_dialogue)
elif (oef_search_msg.performative is OefSearchMessage.Performative.SEARCH_RESULT):
self._handle_search(oef_search_msg, oef_search_dialogue)
else:
self._handle_invalid(oef_search_msg, oef_search_dialogue)
def teardown(self) -> None:
def _handle_unidentified_dialogue(self, oef_search_msg: OefSearchMessage) -> None:
self.context.logger.info('received invalid oef_search message={}, unidentified dialogue.'.format(oef_search_msg))
def _handle_error(self, oef_search_msg: OefSearchMessage, oef_search_dialogue: OefSearchDialogue) -> None:
self.context.logger.info('received oef_search error message={} in dialogue={}.'.format(oef_search_msg, oef_search_dialogue))
def _handle_search(self, oef_search_msg: OefSearchMessage, oef_search_dialogue: OefSearchDialogue) -> None:
if (len(oef_search_msg.agents) == 0):
self.context.logger.info(f'found no agents in dialogue={oef_search_dialogue}, continue searching.')
return
strategy = cast(GenericStrategy, self.context.strategy)
if strategy.is_stop_searching_on_result:
self.context.logger.info('found agents={}, stopping search.'.format(list(map((lambda x: x[(- 5):]), oef_search_msg.agents))))
strategy.is_searching = False
else:
self.context.logger.info('found agents={}.'.format(list(map((lambda x: x[(- 5):]), oef_search_msg.agents))))
query = strategy.get_service_query()
fipa_dialogues = cast(FipaDialogues, self.context.fipa_dialogues)
counterparties = strategy.get_acceptable_counterparties(oef_search_msg.agents)
for counterparty in counterparties:
(cfp_msg, _) = fipa_dialogues.create(counterparty=counterparty, performative=FipaMessage.Performative.CFP, query=query)
self.context.outbox.put_message(message=cfp_msg)
self.context.logger.info('sending CFP to agent={}'.format(counterparty[(- 5):]))
def _handle_invalid(self, oef_search_msg: OefSearchMessage, oef_search_dialogue: OefSearchDialogue) -> None:
self.context.logger.warning('cannot handle oef_search message of performative={} in dialogue={}.'.format(oef_search_msg.performative, oef_search_dialogue)) |
class SentenceCount(GeneratedFeature):
column_name: str
def __init__(self, column_name: str, display_name: Optional[str]=None):
self.column_name = column_name
self.display_name = display_name
super().__init__()
def generate_feature(self, data: pd.DataFrame, data_definition: DataDefinition) -> pd.DataFrame:
def sentence_count_f(s):
if ((s is None) or (isinstance(s, float) and np.isnan(s))):
return 0
number = len(re.split('(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s', s))
return max(1, number)
return pd.DataFrame(dict([(self.column_name, data[self.column_name].apply(sentence_count_f))]))
def feature_name(self) -> ColumnName:
return additional_feature(self, self.column_name, (self.display_name or f'Sentence Count for {self.column_name}')) |
class flow_removed(message):
version = 3
type = 11
def __init__(self, xid=None, cookie=None, priority=None, reason=None, table_id=None, duration_sec=None, duration_nsec=None, idle_timeout=None, hard_timeout=None, packet_count=None, byte_count=None, match=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (cookie != None):
self.cookie = cookie
else:
self.cookie = 0
if (priority != None):
self.priority = priority
else:
self.priority = 0
if (reason != None):
self.reason = reason
else:
self.reason = 0
if (table_id != None):
self.table_id = table_id
else:
self.table_id = 0
if (duration_sec != None):
self.duration_sec = duration_sec
else:
self.duration_sec = 0
if (duration_nsec != None):
self.duration_nsec = duration_nsec
else:
self.duration_nsec = 0
if (idle_timeout != None):
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if (hard_timeout != None):
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if (packet_count != None):
self.packet_count = packet_count
else:
self.packet_count = 0
if (byte_count != None):
self.byte_count = byte_count
else:
self.byte_count = 0
if (match != None):
self.match = match
else:
self.match = ofp.match()
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!Q', self.cookie))
packed.append(struct.pack('!H', self.priority))
packed.append(struct.pack('!B', self.reason))
packed.append(struct.pack('!B', self.table_id))
packed.append(struct.pack('!L', self.duration_sec))
packed.append(struct.pack('!L', self.duration_nsec))
packed.append(struct.pack('!H', self.idle_timeout))
packed.append(struct.pack('!H', self.hard_timeout))
packed.append(struct.pack('!Q', self.packet_count))
packed.append(struct.pack('!Q', self.byte_count))
packed.append(self.match.pack())
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = flow_removed()
_version = reader.read('!B')[0]
assert (_version == 3)
_type = reader.read('!B')[0]
assert (_type == 11)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
obj.cookie = reader.read('!Q')[0]
obj.priority = reader.read('!H')[0]
obj.reason = reader.read('!B')[0]
obj.table_id = reader.read('!B')[0]
obj.duration_sec = reader.read('!L')[0]
obj.duration_nsec = reader.read('!L')[0]
obj.idle_timeout = reader.read('!H')[0]
obj.hard_timeout = reader.read('!H')[0]
obj.packet_count = reader.read('!Q')[0]
obj.byte_count = reader.read('!Q')[0]
obj.match = ofp.match.unpack(reader)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.cookie != other.cookie):
return False
if (self.priority != other.priority):
return False
if (self.reason != other.reason):
return False
if (self.table_id != other.table_id):
return False
if (self.duration_sec != other.duration_sec):
return False
if (self.duration_nsec != other.duration_nsec):
return False
if (self.idle_timeout != other.idle_timeout):
return False
if (self.hard_timeout != other.hard_timeout):
return False
if (self.packet_count != other.packet_count):
return False
if (self.byte_count != other.byte_count):
return False
if (self.match != other.match):
return False
return True
def pretty_print(self, q):
q.text('flow_removed {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('cookie = ')
q.text(('%#x' % self.cookie))
q.text(',')
q.breakable()
q.text('priority = ')
q.text(('%#x' % self.priority))
q.text(',')
q.breakable()
q.text('reason = ')
value_name_map = {0: 'OFPRR_IDLE_TIMEOUT', 1: 'OFPRR_HARD_TIMEOUT', 2: 'OFPRR_DELETE', 3: 'OFPRR_GROUP_DELETE'}
if (self.reason in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.reason], self.reason)))
else:
q.text(('%#x' % self.reason))
q.text(',')
q.breakable()
q.text('table_id = ')
q.text(('%#x' % self.table_id))
q.text(',')
q.breakable()
q.text('duration_sec = ')
q.text(('%#x' % self.duration_sec))
q.text(',')
q.breakable()
q.text('duration_nsec = ')
q.text(('%#x' % self.duration_nsec))
q.text(',')
q.breakable()
q.text('idle_timeout = ')
q.text(('%#x' % self.idle_timeout))
q.text(',')
q.breakable()
q.text('hard_timeout = ')
q.text(('%#x' % self.hard_timeout))
q.text(',')
q.breakable()
q.text('packet_count = ')
q.text(('%#x' % self.packet_count))
q.text(',')
q.breakable()
q.text('byte_count = ')
q.text(('%#x' % self.byte_count))
q.text(',')
q.breakable()
q.text('match = ')
q.pp(self.match)
q.breakable()
q.text('}') |
.usefixtures('use_tmpdir')
def test_that_hook_workflow_without_existing_job_error_is_located():
assert_that_config_leads_to_error(config_file_contents=dedent('\nNUM_REALIZATIONS 1\nHOOK_WORKFLOW NO_SUCH_JOB POST_SIMULATION\n '), expected_error=ExpectedErrorInfo(line=3, column=15, end_column=26)) |
def get_example_tree():
nst1 = NodeStyle()
nst1['bgcolor'] = 'LightSteelBlue'
nst2 = NodeStyle()
nst2['bgcolor'] = 'Moccasin'
nst3 = NodeStyle()
nst3['bgcolor'] = 'DarkSeaGreen'
nst4 = NodeStyle()
nst4['bgcolor'] = 'Khaki'
t = Tree('((((a1,a2),a3), ((b1,b2),(b3,b4))), ((c1,c2),c3));')
for n in t.traverse():
n.dist = 0
n1 = t.common_ancestor(['a1', 'a2', 'a3'])
n1.set_style(nst1)
n2 = t.common_ancestor(['b1', 'b2', 'b3', 'b4'])
n2.set_style(nst2)
n3 = t.common_ancestor(['c1', 'c2', 'c3'])
n3.set_style(nst3)
n4 = t.common_ancestor(['b3', 'b4'])
n4.set_style(nst4)
ts = TreeStyle()
ts.layout_fn = layout
ts.show_leaf_name = False
ts.mode = 'c'
ts.root_opening_factor = 1
return (t, ts) |
class TestPopupSave(unittest.TestCase):
def _make_mock_file_dialog(self, return_value):
m = Mock(spec=FileDialog)
m.open.return_value = return_value
m.path = 'mock'
return m
((ETSConfig.toolkit == 'null'), 'Test meaningless with null toolkit.')
def test_popup_save_with_user_ok(self):
with patch('pyface.api.FileDialog') as fd:
fd.return_value = self._make_mock_file_dialog(OK)
from tvtk.pyface.utils import popup_save
x = popup_save()
self.assertEqual(x, 'mock')
((ETSConfig.toolkit == 'null'), 'Test meaningless with null toolkit.')
def test_popup_save_with_user_not_ok(self):
with patch('pyface.api.FileDialog') as fd:
fd.return_value = self._make_mock_file_dialog(NO)
from tvtk.pyface.utils import popup_save
x = popup_save()
self.assertEqual(x, '') |
def test_add_after_reset(app_instance, mocker):
mocker.patch('embedchain.vectordb.chroma.chromadb.Client')
config = AppConfig(log_level='DEBUG', collect_metrics=False)
chroma_config = ChromaDbConfig(allow_reset=True)
db = ChromaDB(config=chroma_config)
app_instance = App(config=config, db=db)
mocker.patch.object(ChatHistory, 'delete', autospec=True)
app_instance.reset()
app_instance.db.client.heartbeat()
mocker.patch.object(Collection, 'add')
app_instance.db.collection.add(embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2]], metadatas=[{'chapter': '3', 'verse': '16'}, {'chapter': '3', 'verse': '5'}, {'chapter': '29', 'verse': '11'}], ids=['id1', 'id2', 'id3'])
app_instance.reset() |
def ffmpeg_install_windows():
try:
ffmpeg_url = '
ffmpeg_zip_filename = 'ffmpeg.zip'
ffmpeg_extracted_folder = 'ffmpeg'
if os.path.exists(ffmpeg_zip_filename):
os.remove(ffmpeg_zip_filename)
r = requests.get(ffmpeg_url)
with open(ffmpeg_zip_filename, 'wb') as f:
f.write(r.content)
if os.path.exists(ffmpeg_extracted_folder):
for (root, dirs, files) in os.walk(ffmpeg_extracted_folder, topdown=False):
for file in files:
os.remove(os.path.join(root, file))
for dir in dirs:
os.rmdir(os.path.join(root, dir))
os.rmdir(ffmpeg_extracted_folder)
with zipfile.ZipFile(ffmpeg_zip_filename, 'r') as zip_ref:
zip_ref.extractall()
os.remove('ffmpeg.zip')
os.rename(f'{ffmpeg_extracted_folder}-6.0-full_build', ffmpeg_extracted_folder)
for file in os.listdir(os.path.join(ffmpeg_extracted_folder, 'bin')):
os.rename(os.path.join(ffmpeg_extracted_folder, 'bin', file), os.path.join('.', file))
os.rmdir(os.path.join(ffmpeg_extracted_folder, 'bin'))
for file in os.listdir(os.path.join(ffmpeg_extracted_folder, 'doc')):
os.remove(os.path.join(ffmpeg_extracted_folder, 'doc', file))
for file in os.listdir(os.path.join(ffmpeg_extracted_folder, 'presets')):
os.remove(os.path.join(ffmpeg_extracted_folder, 'presets', file))
os.rmdir(os.path.join(ffmpeg_extracted_folder, 'presets'))
os.rmdir(os.path.join(ffmpeg_extracted_folder, 'doc'))
os.remove(os.path.join(ffmpeg_extracted_folder, 'LICENSE'))
os.remove(os.path.join(ffmpeg_extracted_folder, 'README.txt'))
os.rmdir(ffmpeg_extracted_folder)
print('FFmpeg installed successfully! Please restart your computer and then re-run the program.')
except Exception as e:
print('An error occurred while trying to install FFmpeg. Please try again. Otherwise, please install FFmpeg manually and try again.')
print(e)
exit() |
def CreateDataset(opt):
dataset = None
if ((opt.name == 'fashion') or (opt.name == 'humanparsing')):
from data.pickle_dataset import PickleDataset
dataset = PickleDataset()
print(('dataset [%s] was created' % dataset.name()))
dataset.initialize(opt)
return dataset |
class LogsGateway(AWSGateway):
def __init__(self, region: str, access_key_id: Optional[str]=None, access_key_data: Optional[str]=None, config: Optional[Dict[(str, Any)]]=None) -> None:
super().__init__(region, access_key_id, access_key_data, config)
self.client: botocore.client.BaseClient = boto3.client('logs', region_name=self.region, **self.config)
def describe_log_group(self, log_group_name: str) -> Optional[LogGroup]:
response = self.client.describe_log_groups(logGroupNamePrefix=log_group_name)
for group in response['logGroups']:
if (group['logGroupName'] == log_group_name):
log_group = LogGroup(log_group_name=group['logGroupName'])
return log_group |
def checkOrigins(tdb, cmdenv, calc):
if cmdenv.origPlace:
if (cmdenv.startJumps and (cmdenv.startJumps > 0)):
cmdenv.origins = expandForJumps(tdb, cmdenv, calc, cmdenv.origPlace.system, cmdenv.startJumps, '--from', 'starting')
cmdenv.origPlace = None
elif isinstance(cmdenv.origPlace, System):
cmdenv.DEBUG0('origPlace: System: {}', cmdenv.origPlace.name())
if (not cmdenv.origPlace.stations):
raise CommandLineError('No stations at --from system, {}'.format(cmdenv.origPlace.name()))
cmdenv.origins = tuple((station for station in cmdenv.origPlace.stations if checkStationSuitability(cmdenv, calc, station)))
else:
cmdenv.DEBUG0('origPlace: Station: {}', cmdenv.origPlace.name())
checkStationSuitability(cmdenv, calc, cmdenv.origPlace, '--from')
cmdenv.origins = (cmdenv.origPlace,)
cmdenv.startStation = cmdenv.origPlace
checkForEmptyStationList('--from', cmdenv.origPlace, cmdenv.origins, cmdenv.startJumps)
else:
if cmdenv.startJumps:
raise CommandLineError('--start-jumps (-s) only works with --from')
cmdenv.DEBUG0('using all suitable origins')
cmdenv.origins = tuple((station for station in tdb.stationByID.values() if checkStationSuitability(cmdenv, calc, station)))
if ((not cmdenv.startJumps) and isinstance(cmdenv.origPlace, System)):
cmdenv.origins = filterStationSet('--from', cmdenv, calc, cmdenv.origins)
cmdenv.origSystems = tuple(set((stn.system for stn in cmdenv.origins))) |
def run(args):
from .. import Tree, PhyloTree
features = set()
for ftree in src_tree_iterator(args):
if args.ncbi:
tree = PhyloTree(open(ftree))
features.update(['taxid', 'name', 'rank', 'bgcolor', 'sci_name', 'collapse_subspecies', 'named_lineage', 'lineage'])
tree.annotate_ncbi_taxa(args.taxid_attr)
else:
tree = Tree(open(ftree))
type2cast = {'str': str, 'int': int, 'float': float, 'set': set, 'list': list}
for annotation in args.feature:
(aname, asource, amultiple, acast) = (None, None, False, str)
for field in annotation:
try:
(key, value) = [_f.strip() for _f in field.split(':')]
except Exception:
raise ValueError(('Invalid feature option [%s]' % field))
if (key == 'name'):
aname = value
elif (key == 'source'):
asource = value
elif (key == 'multiple'):
amultiple = value
elif (key == 'type'):
try:
acast = type2cast[value]
except KeyError:
raise ValueError(('Invalid feature type [%s]' % field))
else:
raise ValueError(('Unknown feature option [%s]' % field))
if ((not aname) and (not asource)):
ValueError(('name and source are required when annotating a new feature [%s]' % annotation))
features.add(aname)
for line in open(asource, 'r'):
line = line.strip()
if ((not line) or line.startswith('#')):
continue
(nodenames, attr_value) = [_ln.strip() for _ln in line.split('\t')]
nodenames = list(map(str.strip, nodenames.split(',')))
relaxed_grouping = True
if nodenames[0].startswith('!'):
relaxed_grouping = False
nodenames[0] = nodenames[0][1:]
if (len(nodenames) > 1):
target_node = tree.get_common_ancestor(nodenames)
if (not relaxed_grouping):
pass
else:
target_node = (tree & nodenames[0])
if hasattr(target_node, aname):
log.warning(('Overwriting annotation for node" [%s]"' % nodenames))
else:
target_node.add_property(aname, acast(attr_value))
dump(tree, properties=features) |
def start_parse(save_data: bytes, country_code: str) -> dict[(str, Any)]:
try:
save_stats = parse_save(save_data, country_code)
except Exception:
helper.colored_text(f'''
Error: An error has occurred while parsing your save data (address = {address}):''', base=helper.RED)
traceback.print_exc()
game_version = get_game_version(save_data)
if (game_version < 110000):
helper.colored_text(f'''
This save is from before &11.0.0& (current save version is &{helper.gv_to_str(game_version)}&), so this is likely the cause for the issue. &The save editor is not designed to work with saves from before 11.0.0&''')
else:
helper.colored_text('\nPlease report this to &#bug-reports&, and/or &dm me your save& on discord')
helper.exit_editor()
return {}
return save_stats |
def test_function_score_with_single_function():
d = {'function_score': {'filter': {'term': {'tags': 'python'}}, 'script_score': {'script': "doc['comment_count'] * _score"}}}
q = query.Q(d)
assert isinstance(q, query.FunctionScore)
assert isinstance(q.filter, query.Term)
assert (len(q.functions) == 1)
sf = q.functions[0]
assert isinstance(sf, function.ScriptScore)
assert ("doc['comment_count'] * _score" == sf.script) |
def send_from_location_address(subject, text_content, html_content, recipient, location):
mailgun_data = {'from': location.from_email(), 'to': [recipient], 'subject': subject, 'text': text_content}
if html_content:
mailgun_data['html'] = html_content
return mailgun_send(mailgun_data) |
def handle_created_object_record(record: dict, cfg: Config) -> None:
logger.debug({'s3_notification_event': record})
cloudtrail_log_record = get_cloudtrail_log_records(record)
if cloudtrail_log_record:
for cloudtrail_log_event in cloudtrail_log_record['events']:
handle_event(event=cloudtrail_log_event, source_file_object_key=cloudtrail_log_record['key'], rules=cfg.rules, ignore_rules=cfg.ignore_rules) |
def extractSherleyhimechamaWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test(config, json_config_file_1):
config.from_json(json_config_file_1)
assert (config() == {'section1': {'value1': 1}, 'section2': {'value2': 2}})
assert (config.section1() == {'value1': 1})
assert (config.section1.value1() == 1)
assert (config.section2() == {'value2': 2})
assert (config.section2.value2() == 2) |
def hsva_to_rgba(h_, s, v, a):
(r, g, b, a) = (v, v, v, a)
h = (h_ * 360.0)
if (s < 0.0001):
return (r, g, b, a)
hue_slice_index = int((h / 60.0))
hue_partial = ((h / 60.0) - hue_slice_index)
p = (v * (1 - s))
q = (v * (1 - (hue_partial * s)))
t = (v * (1 - ((1 - hue_partial) * s)))
if (0 == hue_slice_index):
(r, g, b) = (v, t, p)
elif (1 == hue_slice_index):
(r, g, b) = (q, v, p)
elif (2 == hue_slice_index):
(r, g, b) = (p, v, t)
elif (3 == hue_slice_index):
(r, g, b) = (p, q, v)
elif (4 == hue_slice_index):
(r, g, b) = (t, p, v)
elif (5 == hue_slice_index):
(r, g, b) = (v, p, q)
return (r, g, b, a) |
class OptionPlotoptionsScatterSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class FundNameGenerator():
_metrics.timeit
def __init__(self, random_state):
self.random_state = random_state
_metrics.timeit
def make(self, legal_entity):
provider = self.get_fund_provider(legal_entity)
region = self.get_geographic_region()
asset = self.get_asset()
asset_class = self.get_class()
divi = self.get_dividend_type()
return f'{provider} {region} {asset} {asset_class} {divi}'
def get_geographic_region(self):
return self.random_state.choice(REGIONS).strip()
def get_fund_provider(self, legal_entity):
parts = re.split('\\s+|[,;.-]\\s*', legal_entity)
if (len(parts) > 1):
return make_name_abbreviation(parts)
return legal_entity.split(' ')[0].strip()
def get_asset(self):
return self.random_state.choice(ASSET_CLASS).strip()
def get_class(self):
return self.random_state.choice(CLASS).strip()
def get_dividend_type(self):
return self.random_state.choice(DIVIDEND_TREATMENT).strip() |
def plot_data(data: Data, datetime_data: Optional[Data], target_data: Optional[Data], agg_data: bool, merge_small_categories: Optional[int]=MAX_CATEGORIES) -> Tuple[(Optional[Histogram], Optional[DataInTime], Optional[DataByTarget])]:
(column_name, column_type, current_data, reference_data) = data
if (column_type == ColumnType.Categorical):
(current_data, reference_data) = relabel_data(current_data, reference_data, merge_small_categories)
else:
current_data = current_data.copy()
if (reference_data is not None):
reference_data = reference_data.copy()
current_data.dropna(inplace=True)
if (reference_data is not None):
reference_data.dropna(inplace=True)
data_hist: Optional[Histogram]
if (column_type == ColumnType.Numerical):
data_hist = make_hist_for_num_plot(current_data, reference_data, calculate_log=True)
elif (column_type == ColumnType.Categorical):
data_hist = make_hist_for_cat_plot(current_data, reference_data, dropna=True)
elif (column_type == ColumnType.Datetime):
(prefix, freq) = choose_agg_period(current_data, reference_data)
curr_data = current_data.dt.to_period(freq=freq).value_counts().reset_index()
curr_data.columns = ['x', 'number_of_items']
curr_data['x'] = curr_data['x'].dt.to_timestamp()
reference = None
if (reference_data is not None):
ref_data = reference_data.dt.to_period(freq=freq).value_counts().reset_index()
ref_data.columns = ['x', 'number_of_items']
ref_data['x'] = ref_data['x'].dt.to_timestamp()
max_ref_date = ref_data['x'].max()
min_curr_date = curr_data['x'].min()
if (max_ref_date == min_curr_date):
(curr_data, ref_data) = _split_periods(curr_data, ref_data, 'x')
reference = ref_data
reference.columns = ['x', 'count']
curr_data.columns = ['x', 'count']
data_hist = Histogram(current=HistogramData.from_df(curr_data), reference=(HistogramData.from_df(reference) if (reference is not None) else None))
elif (column_type == ColumnType.Text):
data_hist = None
else:
raise ValueError(f'Unsupported column type {column_type}')
data_in_time: Optional[DataInTime] = None
if (datetime_data is not None):
(datetime_name, _, datetime_current, datetime_reference) = datetime_data
if (column_type == ColumnType.Numerical):
(df_for_time_plot_curr, df_for_time_plot_ref, prefix) = prepare_data_for_date_num(datetime_current, datetime_reference, datetime_name, column_name, current_data, reference_data)
data_in_time = DataInTime(data_for_plots=DataInTimePlots(current=df_for_time_plot_curr, reference=df_for_time_plot_ref), freq=prefix, datetime_name=datetime_name)
if (column_type == ColumnType.Categorical):
(df_for_time_plot_curr, df_for_time_plot_ref, prefix) = prepare_data_for_date_cat(datetime_current, datetime_reference, datetime_name, column_name, current_data, reference_data)
data_in_time = DataInTime(data_for_plots=DataInTimePlots(current=df_for_time_plot_curr, reference=df_for_time_plot_ref), freq=prefix, datetime_name=datetime_name)
data_by_target: Optional[DataByTarget] = None
if (target_data is not None):
(target_name, target_type, target_current, target_reference) = target_data
curr_df = pd.DataFrame({column_name: current_data, target_name: target_current})
ref_df = None
if ((target_reference is not None) and (reference_data is not None)):
ref_df = pd.DataFrame({column_name: reference_data, target_name: target_reference})
if ((column_type == ColumnType.Categorical) and (target_type == ColumnType.Numerical)):
data_by_target = DataByTarget(box_data=prepare_box_data(curr_df, ref_df, column_name, target_name), target_name=target_name, target_type=target_type.value)
if ((column_type == ColumnType.Numerical) and (target_type == ColumnType.Categorical)):
data_by_target = DataByTarget(box_data=prepare_box_data(curr_df, ref_df, target_name, column_name), target_name=target_name, target_type=target_type.value)
if ((column_type == ColumnType.Numerical) and (target_type == ColumnType.Numerical)):
if ((target_reference is not None) and (reference_data is not None)):
target_ref = target_reference.loc[reference_data.index]
else:
target_ref = None
(raw_plot, agg_plot) = get_data_for_num_num_plot(agg_data, column_name, target_name, current_data, target_current.loc[current_data.index], reference_data, target_ref)
data_by_target = DataByTarget(scatter_data=raw_plot, contour_data=agg_plot, target_name=target_name, target_type=target_type.value)
if ((column_type == ColumnType.Categorical) and (target_type == ColumnType.Categorical)):
(target_current_, target_reference_) = relabel_data(target_current, target_reference, merge_small_categories)
result = get_data_for_cat_cat_plot(column_name, target_name, current_data, target_current_, reference_data, target_reference_)
data_by_target = DataByTarget(count_data=result, target_name=target_name, target_type=target_type.value)
return (data_hist, data_in_time, data_by_target) |
.parametrize('_func', [['div'], ['truediv'], ['floordiv'], ['mod']])
def test_error_when_division_by_zero_and_fill_value_is_none(_func, df_vartypes):
df_zero = df_vartypes.copy()
df_zero.loc[(1, 'Marks')] = 0
transformer = RelativeFeatures(variables=['Age'], reference=['Marks'], func=_func)
transformer.fit(df_vartypes)
with pytest.raises(ValueError) as record:
transformer.transform(df_zero)
msg = 'Some of the reference variables contain zeroes. Division by zero does not exist. Replace zeros before using this transformer for division or set `fill_value` to a number.'
assert (str(record.value) == msg) |
def inv_fft_at_point(vals, modulus, root_of_unity, x):
if (len(vals) == 1):
return vals[0]
half = ((modulus + 1) // 2)
inv_root = pow(root_of_unity, (len(vals) - 1), modulus)
f_of_minus_x_vals = (vals[(len(vals) // 2):] + vals[:(len(vals) // 2)])
evens = [(((f + g) * half) % modulus) for (f, g) in zip(vals, f_of_minus_x_vals)]
odds = [(((f - g) * half) % modulus) for (f, g) in zip(vals, f_of_minus_x_vals)]
comb = [((((o * x) * (inv_root ** i)) + e) % modulus) for (i, (o, e)) in enumerate(zip(odds, evens))]
return inv_fft_at_point(comb[:(len(comb) // 2)], modulus, ((root_of_unity ** 2) % modulus), ((x ** 2) % modulus)) |
class OptionSeriesHistogramZones(Options):
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def dashStyle(self):
return self._config_get(None)
def dashStyle(self, text: str):
self._config(text, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False) |
class GetVersionsByRegexTests(unittest.TestCase):
('anitya.lib.backends.BaseBackend.call_url')
def test_get_versions_by_regex_not_modified(self, mock_call_url):
mock_response = mock.Mock(spec=object)
mock_response.status_code = 304
mock_call_url.return_value = mock_response
mock_project = mock.Mock()
mock_project.get_time_last_created_version = mock.MagicMock(return_value=None)
versions = backends.get_versions_by_regex('url', 'regex', mock_project)
self.assertEqual(versions, [])
('anitya.lib.backends.BaseBackend.call_url')
def test_get_versions_by_regex_string_response(self, mock_call_url):
mock_call_url.return_value = ''
mock_project = mock.Mock()
self.assertRaises(AnityaPluginException, backends.get_versions_by_regex, 'url', 'regex', mock_project) |
def output_model_to_output_infer(output_model: dm.OutputModel) -> dm.OutputInfer:
output_json_encoded = json.dumps(output_model.output)
def __define_name(value: Any) -> Dict[(str, Any)]:
if isinstance(value, str):
return {'string_param': value}
if isinstance(value, bool):
return {'bool_param': value}
if isinstance(value, int):
return {'int_param': value}
if isinstance(value, float):
return {'float_param': value}
return {'string_param': str(value)}
parameters = {k: dm.ParameterMessage(**__define_name(v)) for (k, v) in output_model.parameters.items()}
return dm.OutputInfer(shape=output_model.shape, datatype=output_model.datatype, data=dm.InferData(payload_int=output_model.data), output=output_json_encoded, parameters=parameters, error=output_model.error) |
.skipcomplex
def test_interpolate_vector_valued():
from firedrake.adjoint import ReducedFunctional, Control, taylor_test
mesh = UnitSquareMesh(10, 10)
V1 = VectorFunctionSpace(mesh, 'CG', 1)
V2 = VectorFunctionSpace(mesh, 'DG', 0)
V3 = VectorFunctionSpace(mesh, 'CG', 2)
x = SpatialCoordinate(mesh)
f = interpolate(as_vector(((x[0] * x[1]), (x[0] + x[1]))), V1)
g = interpolate(as_vector(((sin(x[1]) + x[0]), (cos(x[0]) * x[1]))), V2)
u = Function(V3)
u.interpolate(((f * dot(f, g)) - (0.5 * g)))
J = assemble(((inner(f, g) * (u ** 2)) * dx))
rf = ReducedFunctional(J, Control(f))
h = Function(V1)
h.vector()[:] = 1
assert (taylor_test(rf, f, h) > 1.9) |
class ConstantVelocityGaussian3D():
def __init__(self, sigma=(1.0 / 8.0), b=[1.0, 0.0, 0.0], xc=0.25, yc=0.5, zc=0.5):
self.sigma = sigma
self.xc = xc
self.yc = yc
self.zc = zc
self.b = b
def uOfXT(self, x, t):
centerX = ((self.xc + (self.b[0] * t)) % 1.0)
centerY = ((self.yc + (self.b[1] * t)) % 1.0)
centerZ = ((self.zc + (self.b[2] * t)) % 1.0)
d2 = ((((x[0] - centerX) ** 2) + ((x[1] - centerY) ** 2)) + ((x[2] - centerZ) ** 2))
return exp((((- 0.5) * d2) / (self.sigma ** 2))) |
def digest_private_key(args):
_check_output_is_not_input(args.keyfile, args.digest_file)
sk = _load_ecdsa_signing_key(args.keyfile)
repr(sk.to_string())
digest = hashlib.sha256()
digest.update(sk.to_string())
result = digest.digest()
if (args.keylen == 192):
result = result[0:24]
args.digest_file.write(result)
print(('SHA-256 digest of private key %s%s written to %s' % (args.keyfile.name, ('' if (args.keylen == 256) else ' (truncated to 192 bits)'), args.digest_file.name))) |
def test_triangle_mixed(mesh_triangle):
V1 = FunctionSpace(mesh_triangle, 'DG', 1)
V2 = FunctionSpace(mesh_triangle, 'RT', 2)
V = (V1 * V2)
f = Function(V)
(f1, f2) = f.subfunctions
x = SpatialCoordinate(mesh_triangle)
f1.interpolate((x[0] + (1.2 * x[1])))
f2.project(as_vector((x[1], (0.8 + x[0]))))
actual = f.at([0.6, 0.4])
assert isinstance(actual, tuple)
assert (len(actual) == 2)
assert np.allclose(1.08, actual[0])
assert np.allclose([0.4, 1.4], actual[1])
actual = f.at([0.6, 0.4], [0.0, 0.9], [0.3, 0.5])
assert (len(actual) == 3)
assert np.allclose(1.08, actual[0][0])
assert np.allclose([0.4, 1.4], actual[0][1])
assert np.allclose(1.08, actual[1][0])
assert np.allclose([0.9, 0.8], actual[1][1])
assert np.allclose(0.9, actual[2][0])
assert np.allclose([0.5, 1.1], actual[2][1]) |
class QAgent(RL_Agent):
def __init__(self, model=None, n_actions=None):
super().__init__()
self.model = model
self.n_actions = n_actions
def update(self, sd):
self.model.load_state_dict(sd)
def initial_state(self, agent_info, B):
return DictTensor({})
def __call__(self, state, observation, agent_info=None, history=None):
B = observation.n_elems()
agent_step = None
q = self.model(observation['frame'])
(qs, action) = q.max(1)
raction = torch.tensor(np.random.randint(low=0, high=self.n_actions, size=action.size()[0]))
epsilon = agent_info['epsilon']
r = torch.rand(action.size()[0])
mask = r.lt(epsilon).float()
action = ((mask * raction) + ((1 - mask) * action))
action = action.long()
agent_do = DictTensor({'action': action, 'q': q})
return (agent_do, DictTensor({})) |
class PerfectCoronagraph(OpticalElement):
def __init__(self, aperture, order=2, coeffs=None):
self.pupil_grid = aperture.grid
modes = []
if (coeffs is not None):
order = int((2 * np.ceil((0.5 * (np.sqrt(((8 * len(coeffs)) + 1)) - 1)))))
self.coeffs = coeffs
else:
self.coeffs = np.ones(int(((order * ((order / 2) + 1)) / 4)))
for i in range((order // 2)):
for j in range((i + 1)):
modes.append(((aperture * (self.pupil_grid.x ** j)) * (self.pupil_grid.y ** (i - j))))
self.mode_basis = ModeBasis(modes).orthogonalized
self.transformation = self.mode_basis.transformation_matrix
self.transformation_inverse = inverse_truncated(self.transformation, 1e-06)
def forward(self, wavefront):
wf = wavefront.copy()
correction = np.einsum('kj,j,ji,...i->...k', self.transformation, self.coeffs, self.transformation_inverse, wf.electric_field, optimize='optimal')
wf.electric_field -= correction
return wf
def backward(self, wavefront):
return self.forward(wavefront)
def get_transformation_matrix_forward(self, wavelength=1):
return (np.eye(self.pupil_grid.size) - self.transformation.dot((self.coeffs * self.transformation_inverse)))
def get_transformation_matrix_backward(self, wavelength=1):
return self.get_transformation_matrix_forward(wavelength) |
def __callback_on_warc_completed(warc_path, counter_article_passed, counter_article_discarded, counter_article_error, counter_article_total):
global __counter_article_passed
global __counter_article_discarded
global __counter_article_error
global __counter_article_total
global __counter_warc_processed
elapsed_secs = (time.time() - __start_time)
__counter_article_discarded += counter_article_discarded
__counter_article_error += counter_article_error
__counter_article_passed += counter_article_passed
__counter_article_total += counter_article_total
__counter_warc_processed += 1
sec_per_article = (elapsed_secs / counter_article_total)
h_per_warc = ((elapsed_secs / __counter_warc_processed) / 3600)
remaining_warcs = (__number_of_warc_files_on_cc - (__counter_warc_processed + __counter_warc_skipped))
__logger.info('warc processing statistics')
__logger.info('warc files skipped = %i, processed = %i, remaining = %i, total = %i', __counter_warc_skipped, __counter_warc_processed, remaining_warcs, __number_of_warc_files_on_cc)
__logger.info('global [s/article] = %f', sec_per_article)
__logger.info('global [h/warc] = %.3f', h_per_warc)
__logger.info('estimated remaining time [h] = %f', (remaining_warcs * h_per_warc))
__extern_callback_on_warc_completed(warc_path, __counter_article_passed, __counter_article_discarded, __counter_article_error, __counter_article_total, __counter_warc_processed) |
def extractThepotatoroomCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('non-human sub-district office', 'non-human sub-district office', 'translated'), ('as the demon king, i am very distressed because the hero is too weak 1.0', 'as the demon king, i am very distressed because the hero is too weak 1.0', 'translated'), ("please respect the occupation 'evil spirit'", "please respect the occupation 'evil spirit'", 'translated'), ("there's something wrong with this development!", "there's something wrong with this development!", 'translated'), ('insider', 'Insider', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def display(image, server_name=':0'):
if (not isinstance(image, Image)):
raise TypeError(('image must be a wand.image.Image instance, not ' + repr(image)))
system = platform.system()
if (system == 'Windows'):
try:
image.save(filename='win:.')
except DelegateError:
pass
else:
return
if (system in ('Windows', 'Darwin')):
ext = image.format.lower()
if (ext in ('miff', 'xc')):
ext = 'png'
path = tempfile.mktemp(suffix=('.' + ext))
image.save(filename=path)
os.system((('start ' if (system == 'Windows') else 'open ') + path))
else:
library.MagickDisplayImage.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
library.MagickDisplayImage(image.wand, str(server_name).encode()) |
def make_render_children(separator: str) -> Render:
def render_children(node: RenderTreeNode, context: RenderContext) -> str:
render_outputs = (child.render(context) for child in node.children)
return separator.join((out for out in render_outputs if out))
return render_children |
class ExecutionTrace():
def __init__(self, json):
self.nodes = {}
self.clean_nodes = {}
self.tensors = {}
self.proc_group = {}
self.iteration_ids = []
self.schema: str = json['schema']
pid = json['pid']
self.proc_group = {pid: {}}
nodes_list = json['nodes']
node_creation_func = {'1.0.1': ExecutionTrace._create_node_v1_0_1, '1.0.2-chakra.0.0.4': ExecutionTrace._create_node_v1_0_2_chakra_0_0_4}
create_node = node_creation_func.get(self.schema, None)
if (create_node is None):
raise ValueError(f'No corresponding node creation function found for schema version {self.schema}')
for x in nodes_list:
id = x['id']
self.nodes[id] = create_node(pid, x)
input_tensors = self.nodes[id].get_input_tensors()
output_tensors = self.nodes[id].get_output_tensors()
if (x['name'] == '__ROOT_THREAD__'):
tid = self.nodes[id].tid
self.proc_group[pid][tid] = id
for (t_type, t_id, shape) in input_tensors:
if (type(t_id) != tuple):
t_id = tuple(t_id)
if (t_id not in self.tensors):
dtype = t_type[7:(- 1)]
self.tensors[t_id] = TensorNode(t_id, dtype)
self.tensors[t_id].add_sink(id)
self.tensors[t_id].add_shape(shape)
for (t_type, t_id, shape) in output_tensors:
if (type(t_id) != tuple):
t_id = tuple(t_id)
if (t_id not in self.tensors):
dtype = t_type[7:(- 1)]
self.tensors[t_id] = TensorNode(t_id, dtype)
self.tensors[t_id].add_source(id)
self.tensors[t_id].add_shape(shape)
for n in self.nodes.values():
if (n.id != 1):
if (n.parent_id in self.nodes):
self.nodes[n.parent_id].add_child(n)
n.set_parent(self.nodes[n.parent_id])
for n in self.nodes.values():
n.sort_children()
self.remove_dataloader_ops()
def _read_attrs(node: Dict[(str, Any)]) -> Tuple:
attr_types = {'fw_parent': int, 'seq_id': int, 'fw_tid': int, 'op_schema': str, 'rf_id': int, 'scope': int, 'tid': int}
attr_dict = {attr['name']: attr_types[attr['name']](attr['value']) for attr in node['attrs'] if (attr['name'] in attr_types.keys())}
if (attr_dict.keys() != attr_types.keys()):
raise ValueError(('Not all keys in attr_dict have updated values. Node:' + str(node)))
return tuple((attr_dict[key] for key in attr_types.keys()))
def _create_node_v1_0_1(pid, x: Dict[(str, Any)]) -> Node:
return Node(x['name'], x['id'], x['parent'], x['fw_parent'], x['seq_id'], pid, x['tid'], x['fw_tid'], x.get('op_schema', ''), x['scope'], x['inputs'], x['input_types'], x['input_shapes'], x['outputs'], x['output_types'], x['output_shapes'], x.get('rf_id', None))
def _create_node_v1_0_2_chakra_0_0_4(pid, x: Dict[(str, Any)]) -> Node:
(fw_parent, seq_id, fw_tid, op_schema, rf_id, scope, tid) = ExecutionTrace._read_attrs(x)
return Node(x['name'], x['id'], x['ctrl_deps'], fw_parent, seq_id, pid, tid, fw_tid, op_schema, scope, x['inputs']['values'], x['inputs']['types'], x['inputs']['shapes'], x['outputs']['values'], x['outputs']['types'], x['outputs']['shapes'], rf_id)
def get_nodes(self, clean: bool=False):
if clean:
return self.clean_nodes
return self.nodes
def set_iterations(self, step_annotation=PROFILER_STEP_ANNOTATION) -> None:
self.iteration_ids = [1]
for id in sorted(self.nodes.keys()):
if (step_annotation in self.nodes[id].name):
self.iteration_ids.append(id)
self.iteration_ids = sorted(self.iteration_ids)
logging.info(f'Iteration node ids list = {self.iteration_ids}')
def iterations(self) -> Optional[int]:
if (len(self.iteration_ids) == 0):
return None
return (len(self.iteration_ids) - 1)
def get_unique_ops(self, detail: bool=False, clean: bool=False, json_format: bool=False):
def get_param(value, type, shape):
type = type.lower()
SCALAR_TYPES = {'int', 'long', 'float', 'double', 'bool'}
param = {'type': type}
if type.startswith('genericlist'):
param = {'type': 'genericlist'}
param['value'] = []
type_list = type[12:(- 1)].split(',')
param_list = zip(value, type_list, shape)
for (v, t, s) in param_list:
param['value'].append(get_param(v, t, s))
param['size'] = len(value)
elif ((type in SCALAR_TYPES) or (type == 'device')):
param['value'] = value
elif type.startswith('tensor'):
param['type'] = 'tensor'
param['dtype'] = type[7:(- 1)]
param['shape'] = shape
return param
def convert_inputs(inputs, types, shapes):
input_info = zip(inputs, types, shapes)
params = []
for (value, type, shape) in input_info:
params.append(get_param(value, type, shape))
return params
ops = {}
nodes_dict = (self.clean_nodes if clean else self.nodes)
for n in nodes_dict.values():
if n.is_op(detail):
if (n.name in ops):
ops[n.name]['count'] += 1
ops[n.name]['inputs'].append(convert_inputs(n.inputs, n.input_types, n.input_shapes))
else:
ops[n.name] = {'count': 1}
ops[n.name]['inputs'] = [convert_inputs(n.inputs, n.input_types, n.input_shapes)]
for attr in ops.values():
unique = {json.dumps(x, sort_keys=True) for x in attr['inputs']}
attr['inputs'] = list(map(json.loads, unique))
return ops
def print_op_stats(self, detail: bool=False, clean: bool=False, json_format: bool=False):
ops = self.get_unique_ops(detail, clean, json_format)
if json_format:
print(json.dumps(ops, indent=2, sort_keys=True))
else:
print('### OP STATS ###')
for (key, val) in sorted(ops.items()):
print(f'op: {key}')
print(f" count: {val['count']}")
print(' unique inputs:')
for i in val['inputs']:
print(f' input: {i}')
def gen_graphviz(self, file_name):
dot = pydot.Dot(graph_type='digraph')
for (id, n) in self.nodes.items():
dot.add_node(pydot.Node(id, label=f'{n.name} ({n.id})', shape='box', style='filled', fillcolor='#fffbed'))
for id in self.tensors:
dot.add_node(pydot.Node(id, label=f'T{id}', style='filled', fillcolor='#e8faff'))
nodes = (len(self.nodes) + len(self.tensors))
edges = 0
for (id, n) in self.nodes.items():
dot.add_edge(pydot.Edge(n.parent_id, id, arrowhead='odiamond'))
edges += 1
for (_, input, _) in n.get_input_tensors():
dot.add_edge(pydot.Edge(input, id))
edges += 1
for (_, output, _) in n.get_output_tensors():
dot.add_edge(pydot.Edge(id, output))
edges += 1
dot.write_svg(file_name, prog='dot')
logging.info(f'nodes: {nodes}')
logging.info(f'edges: {edges}')
def gen_graphml(self, file_name):
graphml = GraphML(self)
graphml.write('execution trace', file_name)
def gen_graph(self, file_name, type=None):
dot_max_nodes = 300
if (((len(self.nodes) < dot_max_nodes) or (type == 'graphviz')) and (type != 'graphml')):
out_name = f'{file_name}.svg'
self.gen_graphviz(out_name)
else:
out_name = f'{file_name}.graphml'
self.gen_graphml(out_name)
print(f'Execution trace written to {out_name}')
def print_tensors(self, detail: bool=False):
print('### TENSORS ###')
for (id, t) in self.tensors.items():
if detail:
print(f'ID {id}:')
print(' type:', t.dtype)
print(' shapes:', t.shapes)
print(' sources:', t.sources)
print(' sinks:', t.sinks)
else:
print(f'id = {id}:')
print(' type:', t.dtype)
print(' shapes:', t.shapes)
def _print_tree_preorder(self, n, indent, pid, tid, detail: bool):
if (n.type == NodeType.OPERATOR):
print(f'{indent}({n.parent_id}:{n.id}) {n.name}')
inputs = list(n.get_inputs())
print(f'{indent} arg: {inputs}')
outputs = list(n.get_outputs())
print(f'{indent} out: {outputs}')
if (not detail):
return
else:
print(f'{indent}({n.id}) {n.name}')
for c in n.children:
self._print_tree_preorder(c, (indent + ' '), pid, tid, detail)
def print_tree(self, detail: bool=False):
print('### Execution Tree ###')
for (pid, threads) in self.proc_group.items():
print(f'process: {pid}')
for tid in sorted(threads):
print(f' thread: {tid}')
thread_node = self.nodes[threads[tid]]
self._print_tree_preorder(thread_node, ' ', pid, tid, detail)
def node_depend(self, id: int):
n = self.nodes[id]
print(f'ID {id}: Operator')
print(' name:', n.name)
print(' id:', n.id)
print(' rf_id:', n.rf_id)
print(' tid:', n.tid)
print(' parent_id:', n.parent_id)
print(' fw_tid:', n.fw_tid)
print(' type:', n.type)
print(' op_schema:', n.op_schema)
print(' fw_parent_id:', n.fw_parent_id)
print(' scope:', n.scope)
print(' children:', [child.id for child in n.children])
print(' inputs:')
for (dtype, tensor_id, shape) in n.get_input_tensors():
prev_id = 0
for s in self.tensors[tensor_id].sources:
if ((s < id) and (s > prev_id)):
prev_id = s
if (prev_id not in self.nodes):
print(f'Missing source node for {prev_id}')
elif prev_id:
print(f"{(' ' * 16)}{tensor_id}: {dtype} {shape} <-- {prev_id} ({self.nodes[prev_id].name})")
else:
print(f"{(' ' * 16)}{tensor_id}: {dtype} {shape}")
print(' outputs:')
for (dtype, tensor_id, shape) in n.get_output_tensors():
next_id = sys.maxsize
for s in self.tensors[tensor_id].sinks:
if ((s > id) and (s < next_id)):
next_id = s
if (next_id != sys.maxsize):
print(f"{(' ' * 16)}{tensor_id}: {dtype} {shape} --> {next_id} ({self.nodes[next_id].name})")
else:
print(f"{(' ' * 16)}{tensor_id}: {dtype} {shape}")
def tensor_depend(self, id: int):
t = self.tensors[id]
print(f'ID {id}: Tensor')
print(' type:', t.dtype)
print(' shapes:', t.shapes)
sources = {}
for node_id in t.sources:
sources[node_id] = self.nodes[node_id].name
print(' sources:', sources)
sinks = {}
for node_id in t.sinks:
sinks[node_id] = self.nodes[node_id].name
print(' sinks:', sinks)
def remove_dataloader_ops(self):
def check_parent(node):
tmp = node
while (tmp and (tmp.id != tmp.parent_id)):
if ('DataLoader' in tmp.name):
return True
tmp = tmp.parent
return False
if (len(self.clean_nodes.keys()) == 0):
for (id, node) in self.nodes.items():
if (not check_parent(node)):
self.clean_nodes[id] = node
def clone_one_iteration(self, n) -> ExecutionTrace:
assert (n >= 0), 'Iteration too low'
assert (n < len(self.iteration_ids)), 'Iteration too high'
(start_id, end_id) = (self.iteration_ids[n], self.iteration_ids[(n + 1)])
logging.info(f'Copying nodes for iter {n} for ids in the range [{start_id}, {end_id})')
clone = copy.deepcopy(self)
trimmed_nodes = filter((lambda p: (((p[1].id >= start_id) and (p[1].id < end_id)) or (p[1].parent_id == 1))), clone.nodes.items())
clone.nodes = dict(trimmed_nodes)
node_id_set = clone.nodes.keys()
logging.debug(f'filtered node ID set = {node_id_set}')
thread_nodes = {node.tid: node for node in clone.nodes.values() if ((node.parent_id == 1) and (EXECUTION_TRACE_THREAD_ANNOTATION in node.name))}
assert (len(thread_nodes) > 0)
for node in clone.nodes.values():
if ((node.parent is not None) and (node.parent_id != 1) and (node.parent_id not in node_id_set)):
logging.info(f'Fixing parent for node id = {node.id}, parent = {node.parent_id}')
thread_parent = thread_nodes[node.tid]
node.parent_id = thread_parent.id
node.set_parent(thread_parent)
thread_parent.add_child(node)
for node in clone.nodes.values():
children = [child for child in node.children if (child.id in node_id_set)]
node.children = children
clone.clean_nodes = {}
clone.remove_dataloader_ops()
logging.info(f'Nodes trimmed ET = {len(clone.get_nodes())}')
return clone |
class OptionPlotoptionsBulletSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionPlotoptionsBulletSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsBulletSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsBulletSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionPlotoptionsBulletSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionPlotoptionsBulletSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsBulletSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
class nd_option_la(nd_option):
_PACK_STR = '!BB6s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {'ascii': ['hw_src']}
def __init__(self, length, hw_src, data):
super(nd_option_la, self).__init__(self.option_type(), length)
self.hw_src = hw_src
self.data = data
def parser(cls, buf, offset):
(_, length, hw_src) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(length, addrconv.mac.bin_to_text(hw_src))
offset += cls._MIN_LEN
if (len(buf) > offset):
msg.data = buf[offset:]
return msg
def serialize(self):
buf = bytearray(struct.pack(self._PACK_STR, self.option_type(), self.length, addrconv.mac.text_to_bin(self.hw_src)))
if (self.data is not None):
buf.extend(self.data)
mod = (len(buf) % 8)
if mod:
buf.extend(bytearray((8 - mod)))
if (0 == self.length):
self.length = (len(buf) // 8)
struct.pack_into('!B', buf, 1, self.length)
return six.binary_type(buf)
def __len__(self):
length = self._MIN_LEN
if (self.data is not None):
length += len(self.data)
return length |
def execute_code(message: Message, env: Environment) -> Evm:
code = message.code
valid_jump_destinations = get_valid_jump_destinations(code)
evm = Evm(pc=Uint(0), stack=[], memory=bytearray(), code=code, gas_left=message.gas, env=env, valid_jump_destinations=valid_jump_destinations, logs=(), refund_counter=U256(0), running=True, message=message, output=b'', accounts_to_delete=set(), error=None)
try:
if (evm.message.code_address in PRE_COMPILED_CONTRACTS):
evm_trace(evm, PrecompileStart(evm.message.code_address))
PRE_COMPILED_CONTRACTS[evm.message.code_address](evm)
evm_trace(evm, PrecompileEnd())
return evm
while (evm.running and (evm.pc < len(evm.code))):
try:
op = Ops(evm.code[evm.pc])
except ValueError:
raise InvalidOpcode(evm.code[evm.pc])
evm_trace(evm, OpStart(op))
op_implementation[op](evm)
evm_trace(evm, OpEnd())
evm_trace(evm, EvmStop(Ops.STOP))
except ExceptionalHalt as error:
evm_trace(evm, OpException(error))
evm.gas_left = Uint(0)
evm.error = error
return evm |
class ethereumNameIdentifier(Module):
config = Config({Option('ADDRESS', 'Provide your target address or ENS', True): str('0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045')})
def run(self):
TABLE_DATA = []
address = self.config.option('ADDRESS').value
url = (' + address)
response = requests.get(url)
r = json.loads(response.content)
infos = ('ADDRESS', r['address'])
TABLE_DATA.append(infos)
infos = ('NAME', r['name'])
TABLE_DATA.append(infos)
infos = ('AVATAR', r['avatar'])
TABLE_DATA.append(infos)
table = SingleTable(TABLE_DATA, 'ETH')
print(('\n' + table.table)) |
class CRUDDictData(CRUDBase[(DictData, CreateDictData, UpdateDictData)]):
async def get(self, db: AsyncSession, pk: int) -> (DictData | None):
return (await self.get_(db, pk=pk))
async def get_all(self, label: str=None, value: str=None, status: int=None) -> Select:
se = select(self.model).options(selectinload(self.model.type)).order_by(desc(self.model.sort))
where_list = []
if label:
where_list.append(self.model.label.like(f'%{label}%'))
if value:
where_list.append(self.model.value.like(f'%{value}%'))
if (status is not None):
where_list.append((self.model.status == status))
if where_list:
se = se.where(and_(*where_list))
return se
async def get_by_label(self, db: AsyncSession, label: str) -> (DictData | None):
api = (await db.execute(select(self.model).where((self.model.label == label))))
return api.scalars().first()
async def create(self, db: AsyncSession, obj_in: CreateDictData) -> None:
(await self.create_(db, obj_in))
async def update(self, db: AsyncSession, pk: int, obj_in: UpdateDictData) -> int:
return (await self.update_(db, pk, obj_in))
async def delete(self, db: AsyncSession, pk: list[int]) -> int:
apis = (await db.execute(delete(self.model).where(self.model.id.in_(pk))))
return apis.rowcount
async def get_with_relation(self, db: AsyncSession, pk: int) -> (DictData | None):
where = [(self.model.id == pk)]
dict_data = (await db.execute(select(self.model).options(selectinload(self.model.type)).where(*where)))
return dict_data.scalars().first() |
def destroy_s3_event(app, env, region):
generated = get_details(app=app, env=env)
bucket = generated.s3_app_bucket()
session = boto3.Session(profile_name=env, region_name=region)
s3_client = session.client('s3')
config = {}
s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=config)
LOG.debug('Deleted Lambda S3 notification')
return True |
def serialize(obj, name=None, result=None):
if (result is None):
result = {}
def make_name(obj, name=None):
objname = obj.__class__.__name__
if (name is None):
return '({})'.format(objname)
return '{} ({})'.format(name, objname)
name = make_name(obj, name)
try:
obj_attr = obj.__dict__
except AttributeError:
if isinstance(obj, (tuple, list)):
newlist = []
for element in obj:
newlist.append(serialize(element))
result[name] = newlist
elif isinstance(obj, (dict,)):
newdict = {}
for (key, element) in obj.items():
newdict[key] = serialize(element)
result[name] = newdict
else:
result[name] = obj
else:
attrs = {}
result[name] = attrs
for (attr_name, attr_value) in obj_attr.items():
serialize(attr_value, attr_name, attrs)
return result |
def test__RfqLimitOrder_swap(trace_classifier: TraceClassifier):
transaction_hash = '0x4f66832e654f8a4d773ddfd6bb56626db29b90'
block_number =
swap = Swap(abi_name='INativeOrdersFeature', transaction_hash=transaction_hash, transaction_position=168, block_number=block_number, trace_address=[1, 0, 1, 0, 1], contract_address='0xdef1c0ded9bec7f1af027b25eff', from_address='0xdef1c0ded9bec7f1af027b25eff', to_address='0xdef1c0ded9bec7f1af027b25eff', token_out_address='0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2', token_out_amount=, token_in_address='0x95ad61b0a150d79219dcf64e1e6cc01f0b64c4ce', token_in_amount=, protocol=Protocol.zero_ex, error=None)
block = load_test_block(block_number)
classified_traces = trace_classifier.classify(block.traces)
result = get_swaps(classified_traces)
assert (result.count(swap) == 1) |
def execute_exp(config: Config) -> None:
seed = ((os.getpid() + int(datetime.now().strftime('%S%f'))) + int.from_bytes(os.urandom(2), 'big'))
print('Using a generated random seed {}'.format(seed))
config.defrost()
if (config.RUN_TYPE == 'eval'):
config.TASK_CONFIG.TASK.ANGLE_SUCCESS.USE_TRAIN_SUCCESS = False
config.TASK_CONFIG.TASK.IMAGEGOAL_ROTATION_SENSOR.SAMPLE_ANGLE = False
config.TASK_CONFIG.SEED = seed
config.freeze()
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
if (config.FORCE_TORCH_SINGLE_THREADED and torch.cuda.is_available()):
torch.set_num_threads(1)
setup_experiment(config)
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert (trainer_init is not None), f'{config.TRAINER_NAME} is not supported'
trainer = trainer_init(config)
if (config.RUN_TYPE == 'train'):
trainer.train()
elif (config.RUN_TYPE == 'eval'):
trainer.eval() |
class TargetingDynamicRule(AbstractObject):
def __init__(self, api=None):
super(TargetingDynamicRule, self).__init__()
self._isTargetingDynamicRule = True
self._api = api
class Field(AbstractObject.Field):
field_action_type = 'action.type'
ad_group_id = 'ad_group_id'
campaign_group_id = 'campaign_group_id'
campaign_id = 'campaign_id'
impression_count = 'impression_count'
page_id = 'page_id'
post = 'post'
retention_seconds = 'retention_seconds'
_field_types = {'action.type': 'string', 'ad_group_id': 'string', 'campaign_group_id': 'string', 'campaign_id': 'string', 'impression_count': 'string', 'page_id': 'string', 'post': 'string', 'retention_seconds': 'string'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
def TimerTab(timer, accent_color, text_color, background_color, is_shut_down: bool, is_hibernate: bool, is_sleep: bool):
do_nothing = (not (is_shut_down or is_hibernate or is_sleep))
if (time.time() < timer):
timer_date = datetime.fromtimestamp(timer)
timer_date = timer_date.strftime('%#I:%M %p')
timer_text = t('Timer set for $TIME').replace('$TIME', timer_date)
else:
timer_text = t('No Timer Set')
cancel_button = StyledButton(t('Cancel Timer'), accent_color, background_color, key='cancel_timer', visible=(timer != 0))
defaults = {'text_color': text_color, 'background_color': background_color, 'font': FONT_NORMAL, 'enable_events': True}
layout = [[Sg.Radio(t('Shut down when timer runs out'), 'TIMER', default=is_shut_down, key='shut_down', **defaults)], [Sg.Radio(t('Sleep when timer runs out'), 'TIMER', default=is_sleep, key='sleep', **defaults)], [Sg.Radio(t('Hibernate when timer runs out'), 'TIMER', default=is_hibernate, key='hibernate', **defaults)], [Sg.Radio(t('Only Stop Playback').capitalize(), 'TIMER', default=do_nothing, key='timer_stop', **defaults)], [Sg.Text(t('Enter minutes or HH:MM'), font=FONT_NORMAL), Sg.Input(key='timer_input', size=(11, 1), border_width=1), StyledButton(t('Submit'), accent_color, background_color, key='timer_submit')], [Sg.Text(t('Invalid Input (enter minutes or HH:MM)'), font=FONT_NORMAL, visible=False, key='timer_error')], [Sg.Text(timer_text, font=FONT_NORMAL, key='timer_text', size=(20, 1), metadata=(timer != 0)), cancel_button]]
return Sg.Tab(t('Timer'), [[Sg.Column(layout, pad=(0, (50, 0)), justification='center')]], key='tab_timer') |
(name='vrf.get', req_args=[ROUTE_DISTINGUISHER], opt_args=[VRF_RF])
def get_vrf(route_dist, route_family=VRF_RF_IPV4):
vrf_conf = CORE_MANAGER.vrfs_conf.get_vrf_conf(route_dist, vrf_rf=route_family)
if (not vrf_conf):
raise RuntimeConfigError(desc=('No VrfConf with vpn id %s' % route_dist))
return vrf_conf.settings |
def get_data(num_examples: int, num_fl_users: int, examples_per_user: int, fl_batch_size: int, nonfl_batch_size: int, model: IFLModel) -> Tuple[(IFLDataProvider, torch.utils.data.DataLoader)]:
fl_data_provider = get_fl_data_provider(num_examples=num_examples, num_fl_users=num_fl_users, examples_per_user=examples_per_user, batch_size=fl_batch_size, model=model)
dummy_dataset = DummyAlphabetDataset(num_examples)
nonfl_data_loader = torch.utils.data.DataLoader(dummy_dataset, batch_size=nonfl_batch_size, shuffle=False)
return (fl_data_provider, nonfl_data_loader) |
class OptionSeriesPyramidSonificationContexttracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def node_show_races(caller, raw_string, **kwargs):
text = ' Select a |cRace|n.\n\n Select one by number below to view its details, or |whelp|n\n at any time for more info.\n '
options = []
for race in _SORTED_RACES:
options.append({'desc': '|c{}|n'.format(race.name), 'goto': ('node_select_race', {'race': race, **kwargs})})
return ((text, 'Select a race to show its details'), options) |
class TraceCallGraphTestCase(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
test_data_path = Path(__file__).parent.parent.joinpath('tests/data/call_stack/backward_thread.json')
self.test_trace_backward_threads: str = str(test_data_path)
self.t_backward_threads: Trace = Trace(trace_files={0: self.test_trace_backward_threads}, trace_dir='')
self.t_backward_threads.parse_traces()
self.t_backward_threads.decode_symbol_ids(use_shorten_name=False)
self.cg_backward_threads: CallGraph = CallGraph(self.t_backward_threads, ranks=[0])
self.df_backward_threads: pd.DataFrame = self.cg_backward_threads.trace_data.get_trace(0)
def _get_first_index(df: pd.DataFrame, s_name: str) -> int:
indices = df.loc[df['s_name'].eq(s_name)].index.to_list()
return (indices[0] if (len(indices) > 0) else (- 1))
def test_call_graph_attributes(self) -> None:
cg: CallGraph = self.cg_backward_threads
df: pd.DataFrame = self.df_backward_threads
self.assertEqual(len(cg.trace_data.get_all_traces()), 1)
self.assertEqual(df.shape[0], 7)
self.assertListEqual(list(cg.rank_to_stacks.keys()), [0])
self.assertListEqual(list(cg.rank_to_nodes.keys()), [0])
def test_call_graph_using_kernel_node(self) -> None:
cg: CallGraph = self.cg_backward_threads
df: pd.DataFrame = self.df_backward_threads
kernel_indices = df.loc[df['s_name'].str.contains('cutlass::Kernel')]['index']
self.assertTrue((kernel_indices.shape[0] >= 1))
kernel_index = kernel_indices.tolist()[0]
kernel_node = df.loc[kernel_index]
self.assertEqual(kernel_node['height'], 0)
self.assertEqual(kernel_node['depth'], 6)
stack = cg.get_stack_of_node(kernel_index)
self.assertListEqual(stack['s_name'].tolist(), ['ProfilerStep#552', '## backward ##', 'autograd::engine::evaluate_function: AddmmBackward0', 'AddmmBackward0', 'aten::mm', 'cudaLaunchKernel', 'void cutlass::Kernel<cutlass_80_tensorop_s1688gemm_128x128_32x3_nn_align4>'])
self.assertEqual(stack['depth'].tolist(), [0, 1, 2, 3, 4, 5, 6])
self.assertEqual(stack['height'].tolist(), [6, 5, 4, 3, 2, 1, 0])
self.assertEqual(stack['num_kernels'].tolist(), ([1] * 7))
def test_link_main_and_bwd_stacks_has_bwd_annotation(self) -> None:
cg: CallGraph = self.cg_backward_threads
self.assertEqual(len(cg.call_stacks), 2)
self.assertTupleEqual(cg.mapping.shape, (2, 7))
self.assertListEqual(['bwd', 'main'], sorted(cg.mapping['label'].to_list()))
main_stack_root = sorted(cg.mapping.loc[(cg.mapping['label'].eq('main'), 'stack_root')].to_list())
bwd_stack_root = sorted(cg.mapping.loc[(cg.mapping['label'].eq('bwd'), 'stack_root')].to_list())
self.assertListEqual(main_stack_root, bwd_stack_root)
def test_link_main_and_bwd_stacks_no_bwd_annotation(self) -> None:
t: Trace = self.t_backward_threads
for (_, df) in t.get_all_traces().items():
df.drop(df.loc[df['s_name'].eq('## backward ##')].index, inplace=True)
cg: CallGraph = CallGraph(t)
autograd_index = self._get_first_index(t.get_trace(0), 'autograd::engine::evaluate_function: AddmmBackward0')
profiler_step_index = self._get_first_index(t.get_trace(0), 'ProfilerStep#552')
self.assertTrue((autograd_index > (- 1)))
self.assertTrue((profiler_step_index > (- 1)))
csg: CallStackGraph = cg.get_csg_of_node(autograd_index, 0)
self.assertEqual(csg.get_parent(autograd_index), profiler_step_index)
self.assertIn(autograd_index, csg.get_children(profiler_step_index))
def test_skip_gpu_threads(self) -> None:
trace_file = self.test_trace_backward_threads
t: Trace = Trace(trace_files={i: trace_file for i in range(4)})
t.parse_traces()
for (rank, df) in t.get_all_traces().items():
df['pid'] = (rank + 1)
cg: CallGraph = CallGraph(t)
self.assertListEqual(cg.mapping.groupby('rank').size().unique().tolist(), [2])
def test_get_call_stacks(self) -> None:
cg: CallGraph = self.cg_backward_threads
count: int = 0
for csg in cg.get_call_stacks():
count += 1
self.assertEqual(csg.identity.rank, 0)
self.assertEqual(count, 2)
count = 0
for csg in cg.get_call_stacks(rank=0, pid=3914, tid=24922):
count += 1
self.assertEqual(csg.identity, CallStackIdentity(rank=0, pid=3914, tid=24922))
self.assertEqual(count, 1)
for csg in cg.get_call_stacks(stack_index=1):
self.assertEqual(csg.identity, CallStackIdentity(rank=0, pid=3914, tid=24922))
def test_get_csg_of_node(self) -> None:
cg: CallGraph = self.cg_backward_threads
csg = cg.get_csg_of_node(5)
self.assertEqual(csg.identity, CallStackIdentity(rank=0, pid=3914, tid=24922))
def test_get_stack_of_node(self) -> None:
cg: CallGraph = self.cg_backward_threads
stack = cg.get_stack_of_node(5, skip_ancestors=True)
self.assertEqual(len(stack), 1)
self.assertListEqual(stack['parent'].tolist(), [6])
stack = cg.get_stack_of_node(5, skip_ancestors=False)
self.assertEqual(len(stack), 7)
self.assertListEqual(stack['parent'].tolist(), [(- 3914), 0, 1, 2, 3, 4, 6])
stack = cg.get_stack_of_node(2, skip_ancestors=True)
self.assertEqual(len(stack), 5)
self.assertListEqual(stack['parent'].tolist(), [1, 2, 3, 4, 6])
stack = cg.get_stack_of_node(2, skip_ancestors=False)
self.assertEqual(len(stack), 7)
self.assertListEqual(stack['parent'].tolist(), [(- 3914), 0, 1, 2, 3, 4, 6])
def test_call_graph_from_dataframe(self) -> None:
df = self.df_backward_threads
symbol_table = self.t_backward_threads.symbol_table
cg = CallGraph.from_dataframe(df, symbol_table)
cs = cg.get_csg_of_node(1)
node_parent_pairs = [(idx, cs.nodes[idx].parent) for idx in sorted(cs.nodes.keys())]
expected_node_pairs = [((- 3914), (- 1)), (0, (- 3914)), (1, 0), (2, 1), (3, 2), (4, 3), (5, 6), (6, 4)]
self.assertEqual(cg.mapping.shape[0], 2)
self.assertListEqual(node_parent_pairs, expected_node_pairs) |
.asyncio
class TestAEAHelperTCPSocketChannel():
.asyncio
async def test_connection_communication(self):
pipe = TCPSocketChannel()
assert ((pipe.in_path is not None) and (pipe.out_path is not None)), 'TCPSocketChannel not properly setup'
connected = asyncio.ensure_future(pipe.connect())
client_pipe = TCPSocketChannelClient(pipe.out_path, pipe.in_path)
client = Thread(target=_run_echo_service, args=[client_pipe])
client.start()
try:
assert (await connected), 'Failed to connect pipe'
message = b'hello'
(await pipe.write(message))
received = (await pipe.read())
assert (received == message), 'Echoed message differs'
except Exception:
raise
finally:
(await pipe.close())
client.join()
.asyncio
async def test_connection_refused(self):
pipe = TCPSocketChannel()
assert ((pipe.in_path is not None) and (pipe.out_path is not None)), 'TCPSocketChannel not properly setup'
client_pipe = TCPSocketChannelClient(pipe.out_path, pipe.in_path)
connected = (await client_pipe.connect())
assert (connected is False) |
class HPPrinterEntity(Entity):
hass: HomeAssistant = None
integration_name: str = None
entity: EntityData = None
remove_dispatcher = None
current_domain: str = None
ha = None
entity_manager = None
device_manager = None
def initialize(self, hass: HomeAssistant, integration_name: str, entity: EntityData, current_domain: str):
self.hass = hass
self.integration_name = integration_name
self.entity = entity
self.remove_dispatcher = None
self.current_domain = current_domain
self.ha = get_ha(self.hass, self.integration_name)
if (self.ha is None):
_LOGGER.error(f'HPPrinterHomeAssistant was not found for {self.integration_name}')
else:
self.entity_manager = self.ha.entity_manager
self.device_manager = self.ha.device_manager
def unique_id(self) -> Optional[str]:
return self.entity.unique_id
def device_info(self):
return self.device_manager.get(self.entity.device_name)
def name(self):
return self.entity.name
def icon(self):
return self.entity.icon
def should_poll(self):
return False
def extra_state_attributes(self):
return self.entity.attributes
async def async_added_to_hass(self):
async_dispatcher_connect(self.hass, SIGNALS[self.current_domain], self._schedule_immediate_update)
(await self.async_added_to_hass_local())
async def async_will_remove_from_hass(self) -> None:
if (self.remove_dispatcher is not None):
self.remove_dispatcher()
self.remove_dispatcher = None
(await self.async_will_remove_from_hass_local())
def _schedule_immediate_update(self):
self.hass.async_create_task(self._async_schedule_immediate_update())
async def _async_schedule_immediate_update(self):
if (self.entity_manager is None):
_LOGGER.debug(f'Cannot update {self.current_domain} - Entity Manager is None | {self.name}')
elif (self.entity is not None):
previous_state = self.entity.state
entity = self.entity_manager.get_entity(self.current_domain, self.name)
if entity.disabled:
_LOGGER.debug(f'Skip updating {self.name}, Entity is disabled')
else:
self.entity = entity
if (self.entity is not None):
self._immediate_update(previous_state)
async def async_added_to_hass_local(self):
pass
async def async_will_remove_from_hass_local(self):
pass
def _immediate_update(self, previous_state: int):
self.async_schedule_update_ha_state(True) |
.parametrize('arguments,expected', (({}, [EVENT_1_TOPIC]), ({'arg0': 1}, [EVENT_1_TOPIC]), ({'arg0': 1, 'arg3': [1, 2]}, [EVENT_1_TOPIC]), ({'arg1': 1}, [EVENT_1_TOPIC, hex_and_pad(1)]), ({'arg1': [1, 2]}, [EVENT_1_TOPIC, [hex_and_pad(1), hex_and_pad(2)]]), ({'arg1': [1], 'arg2': [2]}, [EVENT_1_TOPIC, hex_and_pad(1), hex_and_pad(2)]), ({'arg1': [1, 3], 'arg2': [2, 4]}, [EVENT_1_TOPIC, [hex_and_pad(1), hex_and_pad(3)], [hex_and_pad(2), hex_and_pad(4)]])))
def test_construct_event_topics(w3, arguments, expected):
actual = construct_event_topic_set(EVENT_1_ABI, w3.codec, arguments)
assert (actual == expected) |
class OptionPlotoptionsPolygon(Options):
def accessibility(self) -> 'OptionPlotoptionsPolygonAccessibility':
return self._config_sub_data('accessibility', OptionPlotoptionsPolygonAccessibility)
def allowPointSelect(self):
return self._config_get(False)
def allowPointSelect(self, flag: bool):
self._config(flag, js_type=False)
def animation(self):
return self._config_get(True)
def animation(self, flag: bool):
self._config(flag, js_type=False)
def animationLimit(self):
return self._config_get(None)
def animationLimit(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def clip(self):
return self._config_get(True)
def clip(self, flag: bool):
self._config(flag, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def colorAxis(self):
return self._config_get(0)
def colorAxis(self, num: float):
self._config(num, js_type=False)
def colorIndex(self):
return self._config_get(None)
def colorIndex(self, num: float):
self._config(num, js_type=False)
def colorKey(self):
return self._config_get('y')
def colorKey(self, text: str):
self._config(text, js_type=False)
def connectEnds(self):
return self._config_get(None)
def connectEnds(self, flag: bool):
self._config(flag, js_type=False)
def connectNulls(self):
return self._config_get(False)
def connectNulls(self, flag: bool):
self._config(flag, js_type=False)
def crisp(self):
return self._config_get(True)
def crisp(self, flag: bool):
self._config(flag, js_type=False)
def cursor(self):
return self._config_get(None)
def cursor(self, text: str):
self._config(text, js_type=False)
def custom(self):
return self._config_get(None)
def custom(self, value: Any):
self._config(value, js_type=False)
def dashStyle(self):
return self._config_get('Solid')
def dashStyle(self, text: str):
self._config(text, js_type=False)
def dataLabels(self) -> 'OptionPlotoptionsPolygonDatalabels':
return self._config_sub_data('dataLabels', OptionPlotoptionsPolygonDatalabels)
def dataSorting(self) -> 'OptionPlotoptionsPolygonDatasorting':
return self._config_sub_data('dataSorting', OptionPlotoptionsPolygonDatasorting)
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def dragDrop(self) -> 'OptionPlotoptionsPolygonDragdrop':
return self._config_sub_data('dragDrop', OptionPlotoptionsPolygonDragdrop)
def enableMouseTracking(self):
return self._config_get(True)
def enableMouseTracking(self, flag: bool):
self._config(flag, js_type=False)
def events(self) -> 'OptionPlotoptionsPolygonEvents':
return self._config_sub_data('events', OptionPlotoptionsPolygonEvents)
def findNearestPointBy(self):
return self._config_get('xy')
def findNearestPointBy(self, text: str):
self._config(text, js_type=False)
def getExtremesFromAll(self):
return self._config_get(False)
def getExtremesFromAll(self, flag: bool):
self._config(flag, js_type=False)
def inactiveOtherPoints(self):
return self._config_get(False)
def inactiveOtherPoints(self, flag: bool):
self._config(flag, js_type=False)
def includeInDataExport(self):
return self._config_get(None)
def includeInDataExport(self, flag: bool):
self._config(flag, js_type=False)
def keys(self):
return self._config_get(None)
def keys(self, value: Any):
self._config(value, js_type=False)
def label(self) -> 'OptionPlotoptionsPolygonLabel':
return self._config_sub_data('label', OptionPlotoptionsPolygonLabel)
def legendSymbol(self):
return self._config_get('rectangle')
def legendSymbol(self, text: str):
self._config(text, js_type=False)
def linecap(self):
return self._config_get(round)
def linecap(self, value: Any):
self._config(value, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def linkedTo(self):
return self._config_get(None)
def linkedTo(self, text: str):
self._config(text, js_type=False)
def marker(self) -> 'OptionPlotoptionsPolygonMarker':
return self._config_sub_data('marker', OptionPlotoptionsPolygonMarker)
def negativeColor(self):
return self._config_get(None)
def negativeColor(self, text: str):
self._config(text, js_type=False)
def onPoint(self) -> 'OptionPlotoptionsPolygonOnpoint':
return self._config_sub_data('onPoint', OptionPlotoptionsPolygonOnpoint)
def opacity(self):
return self._config_get(1)
def opacity(self, num: float):
self._config(num, js_type=False)
def point(self) -> 'OptionPlotoptionsPolygonPoint':
return self._config_sub_data('point', OptionPlotoptionsPolygonPoint)
def pointDescriptionFormat(self):
return self._config_get(None)
def pointDescriptionFormat(self, value: Any):
self._config(value, js_type=False)
def pointDescriptionFormatter(self):
return self._config_get(None)
def pointDescriptionFormatter(self, value: Any):
self._config(value, js_type=False)
def pointInterval(self):
return self._config_get(1)
def pointInterval(self, num: float):
self._config(num, js_type=False)
def pointIntervalUnit(self):
return self._config_get(None)
def pointIntervalUnit(self, value: Any):
self._config(value, js_type=False)
def pointStart(self):
return self._config_get(0)
def pointStart(self, num: float):
self._config(num, js_type=False)
def relativeXValue(self):
return self._config_get(False)
def relativeXValue(self, flag: bool):
self._config(flag, js_type=False)
def selected(self):
return self._config_get(False)
def selected(self, flag: bool):
self._config(flag, js_type=False)
def showCheckbox(self):
return self._config_get(False)
def showCheckbox(self, flag: bool):
self._config(flag, js_type=False)
def showInLegend(self):
return self._config_get(None)
def showInLegend(self, flag: bool):
self._config(flag, js_type=False)
def skipKeyboardNavigation(self):
return self._config_get(None)
def skipKeyboardNavigation(self, flag: bool):
self._config(flag, js_type=False)
def sonification(self) -> 'OptionPlotoptionsPolygonSonification':
return self._config_sub_data('sonification', OptionPlotoptionsPolygonSonification)
def stacking(self):
return self._config_get(None)
def stacking(self, text: str):
self._config(text, js_type=False)
def states(self) -> 'OptionPlotoptionsPolygonStates':
return self._config_sub_data('states', OptionPlotoptionsPolygonStates)
def step(self):
return self._config_get(None)
def step(self, value: Any):
self._config(value, js_type=False)
def stickyTracking(self):
return self._config_get(False)
def stickyTracking(self, flag: bool):
self._config(flag, js_type=False)
def tooltip(self) -> 'OptionPlotoptionsPolygonTooltip':
return self._config_sub_data('tooltip', OptionPlotoptionsPolygonTooltip)
def trackByArea(self):
return self._config_get(True)
def trackByArea(self, flag: bool):
self._config(flag, js_type=False)
def turboThreshold(self):
return self._config_get(1000)
def turboThreshold(self, num: float):
self._config(num, js_type=False)
def visible(self):
return self._config_get(True)
def visible(self, flag: bool):
self._config(flag, js_type=False)
def zoneAxis(self):
return self._config_get('y')
def zoneAxis(self, text: str):
self._config(text, js_type=False)
def zones(self) -> 'OptionPlotoptionsPolygonZones':
return self._config_sub_data('zones', OptionPlotoptionsPolygonZones) |
class TestHanoi(unittest.TestCase):
def test_hanoi(self):
hanoi = Hanoi()
num_disks = 3
src = Stack()
buff = Stack()
dest = Stack()
print('Test: None towers')
self.assertRaises(TypeError, hanoi.move_disks, num_disks, None, None, None)
print('Test: 0 disks')
hanoi.move_disks(num_disks, src, dest, buff)
self.assertEqual(dest.pop(), None)
print('Test: 1 disk')
src.push(5)
hanoi.move_disks(num_disks, src, dest, buff)
self.assertEqual(dest.pop(), 5)
print('Test: 2 or more disks')
for disk_index in range(num_disks, (- 1), (- 1)):
src.push(disk_index)
hanoi.move_disks(num_disks, src, dest, buff)
for disk_index in range(0, num_disks):
self.assertEqual(dest.pop(), disk_index)
print('Success: test_hanoi') |
class CoprChroot(db.Model, helpers.Serializer):
id = db.Column('id', db.Integer, primary_key=True)
__table_args__ = (db.UniqueConstraint('mock_chroot_id', 'copr_id', name='copr_chroot_mock_chroot_id_copr_id_uniq'),)
copr_id = db.Column(db.Integer, db.ForeignKey('copr.id'))
buildroot_pkgs = db.Column(db.Text)
repos = db.Column(db.Text, default='', server_default='', nullable=False)
mock_chroot_id = db.Column(db.Integer, db.ForeignKey('mock_chroot.id'), nullable=False)
mock_chroot = db.relationship('MockChroot', backref=db.backref('copr_chroots'))
copr_id = db.Column(db.Integer, db.ForeignKey('copr.id'), nullable=False, index=True)
copr = db.relationship('Copr', backref=db.backref('copr_chroots', single_parent=True, cascade='all,delete,delete-orphan'))
comps_zlib = db.Column(db.LargeBinary(), nullable=True)
comps_name = db.Column(db.String(127), nullable=True)
module_toggle = db.Column(db.Text, nullable=True)
with_opts = db.Column(db.Text, default='', server_default='', nullable=False)
without_opts = db.Column(db.Text, default='', server_default='', nullable=False)
delete_after = db.Column(db.DateTime, index=True)
delete_notify = db.Column(db.DateTime, index=True)
bootstrap = db.Column(db.Text)
bootstrap_image = db.Column(db.Text)
isolation = db.Column(db.Text, default='unchanged')
deleted = db.Column(db.Boolean, default=False, index=True)
def update_comps(self, comps_xml):
self.comps_zlib = zlib.compress(comps_xml)
def buildroot_pkgs_list(self):
return (self.buildroot_pkgs or '').split()
def repos_list(self):
return (self.repos or '').split()
def comps(self):
if self.comps_zlib:
return zlib.decompress(self.comps_zlib).decode('utf-8')
def name(self):
return self.mock_chroot.name
def full_name(self):
return '{0}/{1}'.format(self.copr.full_name, self.name)
def is_active(self):
return self.mock_chroot.is_active
def delete_status(self):
if self.deleted:
if (not self.delete_after):
return ChrootDeletionStatus('deleted')
if (self.delete_after < datetime.datetime.now()):
return ChrootDeletionStatus('expired')
return ChrootDeletionStatus('preserved')
if (not self.is_active):
if ((not self.delete_after) and (not self.delete_notify)):
return ChrootDeletionStatus('deactivated')
if (not self.delete_notify):
return ChrootDeletionStatus('preserved')
if (not self.delete_after):
return ChrootDeletionStatus('deleted')
if (self.delete_after < datetime.datetime.now()):
return ChrootDeletionStatus('expired')
return ChrootDeletionStatus('preserved')
if ((not self.delete_after) and (not self.delete_notify)):
return ChrootDeletionStatus('active')
raise RuntimeError("Undefined status, this shouldn't happen")
def delete_status_str(self):
return ChrootDeletionStatus(self.delete_status)
def delete_after_expired(self):
return (self.delete_status in [ChrootDeletionStatus('expired'), ChrootDeletionStatus('deleted')])
def delete_after_days(self):
if (not self.delete_after):
return None
now = datetime.datetime.now()
days = (self.delete_after - now).days
return (days if (days > 0) else 0)
def delete_after_humanized(self):
if (self.delete_after is None):
return None
if self.delete_after_expired:
return 'To be removed in next cleanup'
delta = (self.delete_after - datetime.datetime.now())
if delta.days:
return '{0} days'.format(delta.days)
hours = int(round((delta.seconds / 3600)))
if hours:
return '{0} hours'.format(hours)
return 'less then an hour'
def module_setup_commands(self):
commands = []
modules = (self.module_toggle.split(',') if self.module_toggle else [])
for m in modules:
m = m.strip()
mod_tuple = ({'disable': m[1:]} if (m[0] == '!') else {'enable': m})
commands.append(mod_tuple)
return commands
def to_dict(self):
options = {'__columns_only__': ['buildroot_pkgs', 'repos', 'comps_name', 'copr_id', 'with_opts', 'without_opts']}
d = super(CoprChroot, self).to_dict(options=options)
d['mock_chroot'] = self.mock_chroot.name
return d
def bootstrap_setup(self):
settings = {}
settings['bootstrap'] = self.copr.bootstrap
if self.bootstrap_changed:
settings['bootstrap'] = self.bootstrap
if (settings['bootstrap'] == 'custom_image'):
settings['bootstrap_image'] = self.bootstrap_image
if (settings['bootstrap'] in [None, 'default']):
return {}
return settings
def bootstrap_changed(self):
return (self.bootstrap and (self.bootstrap != 'unchanged'))
def isolation_setup(self):
settings = {'isolation': self.copr.isolation}
if (self.isolation and (self.isolation != 'unchanged')):
settings['isolation'] = self.isolation
if (settings['isolation'] in [None, 'default']):
return {}
return settings |
def ensureTensorFlush(tensors: Union[(List[torch.Tensor], torch.Tensor)]) -> float:
x = None
if (isinstance(tensors, list) and (len(tensors) > 0) and (len(tensors[(- 1)]) > 0)):
x = tensors[(- 1)][(- 1)].item()
elif (isinstance(tensors, torch.Tensor) and (tensors.nelement() > 0)):
x = tensors[(- 1)].item()
return x |
class OptionSeriesPictorialSonificationDefaultspeechoptionsPointgrouping(Options):
def algorithm(self):
return self._config_get('last')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class EventDetail(ResourceDetail):
def before_get(self, args, kwargs):
kwargs = get_id(kwargs)
if (is_logged_in() and has_access('is_coorganizer', event_id=kwargs['id'])):
self.schema = EventSchema
else:
self.schema = EventSchemaPublic
def before_get_object(self, view_kwargs):
get_id(view_kwargs)
def after_get_object(self, event, view_kwargs):
if (event and (event.state == Event.State.DRAFT)):
if ((not is_logged_in()) or (not has_access('is_coorganizer', event_id=event.id))):
raise ObjectNotFound({'parameter': '{id}'}, 'Event: not found')
def before_patch(self, args, kwargs, data=None):
user = User.query.filter_by(id=current_user.id).one()
validate_event(user, data)
def before_update_object(self, event, data, view_kwargs):
g.event_name = event.name
is_date_updated = ((data.get('starts_at') != event.starts_at) or (data.get('ends_at') != event.ends_at))
is_draft_published = ((event.state == Event.State.DRAFT) and (data.get('state') == Event.State.PUBLISHED))
is_event_restored = (event.deleted_at and (not data.get('deleted_at')))
if (is_date_updated or is_draft_published or is_event_restored):
validate_date(event, data)
if data.get('is_document_enabled'):
d = data.get('document_links')
if d:
for document in d:
if ((not document.get('name')) or (not document.get('link'))):
raise UnprocessableEntityError({'pointer': '/'}, 'Enter required fields link and name')
if (has_access('is_admin') and (data.get('deleted_at') != event.deleted_at)):
if ((len(event.orders) != 0) and (not has_access('is_super_admin'))):
raise ForbiddenError({'source': ''}, 'Event associated with orders cannot be deleted')
event.deleted_at = data.get('deleted_at')
if (data.get('original_image_url') and (data['original_image_url'] != event.original_image_url)):
start_image_resizing_tasks(event, data['original_image_url'])
if (data.get('group') != event.group_id):
if event.is_announced:
event.is_announced = False
save_to_db(event)
def after_update_object(self, event, data, view_kwargs):
if (event.name != g.event_name):
from .helpers.tasks import rename_chat_room
rename_chat_room.delay(event.id)
if ((event.state == Event.State.PUBLISHED) and event.schedule_published_on):
start_export_tasks(event)
else:
clear_export_urls(event)
decorators = (api.has_permission('is_coorganizer', methods='PATCH,DELETE', fetch='id', fetch_as='event_id', model=Event),)
schema = EventSchema
data_layer = {'session': db.session, 'model': Event, 'methods': {'before_update_object': before_update_object, 'before_get_object': before_get_object, 'after_get_object': after_get_object, 'after_update_object': after_update_object, 'before_patch': before_patch}} |
class OptionSeriesPyramid3dSonificationDefaultinstrumentoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class FlaskServer(AbstractServer):
def __init__(self, *args, **kwargs):
global app
self._app = app
self._server = None
self._serving = None
super().__init__(*args, **kwargs)
def _open(self, host, port, **kwargs):
if port:
try:
port = int(port)
except ValueError:
port = port_hash(port)
else:
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
prefered_port = port_hash('Flexx')
for i in range(8):
port = (prefered_port + i)
try:
a_socket.bind((host, port))
except Exception:
continue
a_socket.close()
break
else:
raise RuntimeError('No port found to start flask')
self._serving = (host, port)
manager.loop = self._loop
asyncio.run_coroutine_threadsafe(self._thread_switch(), self._loop)
async def _thread_switch():
while True:
time.sleep(0)
(await asyncio.sleep(1e-09))
def start(self):
sockets = Sockets(app)
register_blueprints(self._app, sockets)
def RunServer():
self._server = pywsgi.WSGIServer(self._serving, self._app, handler_class=WebSocketHandler)
proto = self.protocol
logger.info(('Serving apps at %s://%s:%i/' % (proto, *self._serving)))
self._server.serve_forever()
_thread = threading.Thread(target=RunServer)
_thread.daemon = True
_thread.start()
super().start()
def start_serverless(self):
super().start()
def _close(self):
self._server.stop()
def app(self):
return self._app
def server(self):
return self._server
def protocol(self):
return ' |
def log_fortianalyzer2_override_setting(data, fos):
vdom = data['vdom']
log_fortianalyzer2_override_setting_data = data['log_fortianalyzer2_override_setting']
filtered_data = underscore_to_hyphen(filter_log_fortianalyzer2_override_setting_data(log_fortianalyzer2_override_setting_data))
return fos.set('log.fortianalyzer2', 'override-setting', data=filtered_data, vdom=vdom) |
()
_context
('fides_dir', default='.', type=click.Path(exists=True))
('--opt-in', is_flag=True, help='Automatically opt-in to anonymous usage analytics.')
def init(ctx: click.Context, fides_dir: str, opt_in: bool) -> None:
executed_at = datetime.now(timezone.utc)
config = ctx.obj['CONFIG']
click.echo(FIDES_ASCII_ART)
click.echo('Initializing fides...')
(config, config_path) = create_and_update_config_file(config, fides_dir, opt_in=opt_in)
print_divider()
send_init_analytics(config.user.analytics_opt_out, config_path, executed_at)
echo_green('fides initialization complete.') |
class OptionPlotoptionsHistogramStatesSelect(Options):
def animation(self) -> 'OptionPlotoptionsHistogramStatesSelectAnimation':
return self._config_sub_data('animation', OptionPlotoptionsHistogramStatesSelectAnimation)
def borderColor(self):
return self._config_get('#000000')
def borderColor(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#cccccc')
def color(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
class UpdateView(GenericModelView):
success_url = None
template_name_suffix = '_form'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form(instance=self.object)
context = self.get_context_data(form=form)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form(data=request.POST, files=request.FILES, instance=self.object)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
def form_valid(self, form):
self.object = form.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
context = self.get_context_data(form=form)
return self.render_to_response(context)
def get_success_url(self):
try:
return (self.success_url or self.object.get_absolute_url())
except AttributeError:
msg = "No URL to redirect to. '%s' must provide 'success_url' or define a 'get_absolute_url()' method on the Model."
raise ImproperlyConfigured((msg % self.__class__.__name__)) |
class SSLVerifier(object):
class VerificationError(ValueError):
pass
user_cert = None
ca_cert = None
def __init__(self, cert_user=None, cert_ca=None):
self.__class__.user_cert = cert_user
self.__class__.ca_cert = cert_ca
def verify(cls):
if (cls.user_cert is None):
raise OSError('No user certificate supplied.')
cls.verify_against_revoked()
result = cls.verify_against_ca()[0].split('stdin:')[1]
return result.strip()
def verify_expired(cls, attime=str(time.time())):
if (cls.user_cert is None):
raise OSError('No user certificate supplied.')
echo_process = subprocess.Popen(['echo', cls.user_cert], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
a = subprocess.Popen(['openssl', 'verify', '-CAfile', cls.ca_cert, '-x509_strict', '-attime', attime], stdin=echo_process.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
(outs, errs) = a.communicate()
if ((outs is None) and (errs is None)):
raise OSError('Verification error in executing command "{}"'.format('openssl verify -CAfile {} -x509_strict'.format(cls.ca_cert)))
if (a.returncode != 0):
raise OSError('Verification error in executing command "{}". Error: {}, returncode: {}'.format('openssl verify -CAfile {} -x509_strict'.format(cls.ca_cert), errs.decode('utf-8').replace('\n', ''), a.returncode))
d = [b for b in outs.decode('utf-8').split('\n') if (b != '')]
return (False if ('OK' in d[0]) else True)
def get_revoked_fingerprint(cls):
REVOKED_CERTS = []
return REVOKED_CERTS
def verify_against_revoked(cls):
revoked_fingerprints = cls.get_revoked_fingerprint()
fp = cls.get_fingerprint()
if (fp in revoked_fingerprints):
raise SSLVerifier.VerificationError(str(), 'matches revoked fingerprint', fp)
def verify_against_ca(cls):
echo_process = subprocess.Popen(['echo', cls.user_cert], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
args = 'openssl verify -CAfile {}'.format(cls.ca_cert)
if (utils.get_open_ssl_version(version_string=False)[0] < 3):
args += ' -x509_strict'
a = subprocess.Popen(args.split(), stdin=echo_process.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
(outs, errs) = a.communicate()
if ((outs is None) and (errs is None)):
raise OSError('Verification error in executing command "{}"'.format(args))
if (a.returncode != 0):
raise OSError('Verification error in executing command "{}". Error: {}, returncode: {}'.format(args, errs.decode('utf-8').replace('\n', ''), a.returncode))
d = [b for b in outs.decode('utf-8').split('\n') if (b != '')]
if ('OK' not in d[0]):
raise SSLVerifier.VerificationError(str(), 'failed verification', errs)
return d
'\n Common x509 options:\n -serial - print serial number value\n -subject_hash - print subject hash value\n -subject_hash_old - print old-style (MD5) subject hash value\n -issuer_hash - print issuer hash value\n -issuer_hash_old - print old-style (MD5) issuer hash value\n -hash - synonym for -subject_hash\n -subject - print subject DN\n -issuer - print issuer DN\n -email - print email address(es)\n -startdate - notBefore field\n -enddate - notAfter field\n -purpose - print out certificate purposes\n -dates - both Before and After dates\n -modulus - print the RSA key modulus\n -pubkey - output the public key\n -fingerprint - print the certificate fingerprint\n -alias - output certificate alias\n '
def get_x509(cls, cmd):
if (cls.user_cert is None):
raise OSError('No user certificate supplied.')
echo_process = subprocess.Popen(['echo', cls.user_cert], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
a = subprocess.Popen(['openssl', 'x509', '-noout', '-nameopt', 'sep_comma_plus_space', cmd], stdin=echo_process.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
(outs, errs) = a.communicate()
if ((outs is None) and (errs is None)):
raise OSError('Error in executing command "{}"'.format('openssl x509 -noout {}'.format(cmd)))
if (a.returncode != 0):
raise OSError('Error in executing command "{}". Error: {}, return code: {}'.format(cmd, errs.decode('utf-8').replace('\n', ''), a.returncode))
d = [b for b in outs.decode('utf-8').split('\n') if (b != '')]
return d
def get_serial(cls):
cmd = '-serial'
serial = cls.get_x509(cmd=cmd)[0].split('serial=')[1].strip()
return serial
def get_purposes(cls):
cmd = '-purpose'
purposes = cls.get_x509(cmd=cmd)
return purposes
def get_issuer_common_name(cls):
cmd = '-issuer'
issuer = cls.get_x509(cmd=cmd)[0].split('issuer=')[1].strip()
return issuer
def get_subject(cls):
cmd = '-subject'
subject_text = cls.get_x509(cmd=cmd)[0].split('subject=')[1].strip()
subject = subject_text.strip().split(', ')
country = next(filter((lambda x: x.startswith('C=')), subject), None)
state = next(filter((lambda x: x.startswith('ST=')), subject), None)
organisation = next(filter((lambda x: x.startswith('O=')), subject), None)
commonName = next(filter((lambda x: x.startswith('CN=')), subject), None)
email = next(filter((lambda x: x.startswith('emailAddress=')), subject), None)
subject_dict = {'country': ('' if (country is None) else country.split('C=')[1]), 'state': ('' if (state is None) else state.split('ST=')[1]), 'organisation': ('' if (organisation is None) else organisation.split('O=')[1]), 'commonName': ('' if (commonName is None) else commonName.split('CN=')[1]), 'email': ('' if (email is None) else email.split('emailAddress=')[1])}
return subject_dict
def get_fingerprint(cls):
cmd = '-fingerprint'
fp = cls.get_x509(cmd=cmd)[0].split('SHA1 Fingerprint=')[1].strip()
return fp
def get_pubkey(cls):
cmd = '-pubkey'
pk = cls.get_x509(cmd=cmd)[0].strip()
return pk
def get_startdate(cls):
cmd = '-startdate'
stdt = cls.get_x509(cmd=cmd)[0].split('notBefore=')[1].strip()
return stdt
def get_enddate(cls):
cmd = '-enddate'
enddt = cls.get_x509(cmd=cmd)[0].split('notAfter=')[1].strip()
return enddt
def is_expired(cls):
enddt = cls.get_enddate()
dt_format = '%b %d %X %Y %Z'
cert_time = time.mktime(datetime.datetime.strptime(enddt, dt_format).timetuple())
curr_time = time.time()
return (True if (cert_time < curr_time) else False)
def set_ca_cert(cls, cert):
cls.ca_cert = cert
def set_user_cert(cls, cert):
cls.user_cert = cert |
def parse_args(parser):
args = parser.parse_args()
if ('EGGNOG_DATA_DIR' in os.environ):
set_data_path(os.environ['EGGNOG_DATA_DIR'])
if args.data_dir:
set_data_path(args.data_dir)
if args.version:
version = ''
try:
version = get_full_version_info()
except Exception:
version = get_version()
print(version)
sys.exit(0)
args.call_info = get_call_info()
if args.list_taxa:
print_taxa()
sys.exit(0)
if (args.cpu == 0):
args.cpu = multiprocessing.cpu_count()
multiprocessing.set_start_method(args.mp_start_method)
if ((args.resume == True) and (args.override == True)):
parser.error('Only one of --resume or --override is allowed.')
if ((args.training_genome is not None) and (args.training_file is None)):
parser.error('"--training_genome requires --training_file"')
if ((args.training_genome is None) and (args.training_file is not None)):
if (not os.path.isfile(args.training_file)):
parser.error('"--training_file must point to an existing file, if no --training_genome is provided."')
if ((args.mode == SEARCH_MODE_DIAMOND) or (args.mode == SEARCH_MODE_NOVEL_FAMS)):
dmnd_db = get_eggnog_dmnd_db(args.dmnd_db, args.mode, get_data_path())
if (not pexists(dmnd_db)):
print(colorify(('DIAMOND database %s not present. Use download_eggnog_database.py to fetch it' % dmnd_db), 'red'))
raise EmapperException()
if (args.input is not None):
if (args.annotate_hits_table is not None):
print(colorify(f'--annotate_hits_table will be ignored, due to -m {args.mode}', 'blue'))
args.annotate_hits_table = None
elif (args.annotate_hits_table is not None):
print(colorify(f'Assuming -m {SEARCH_MODE_NO_SEARCH}', 'blue'))
args.mode = SEARCH_MODE_NO_SEARCH
else:
parser.error('An input fasta file is required (-i)')
if (not args.output):
parser.error('An output project name is required (-o)')
elif (args.mode == SEARCH_MODE_MMSEQS2):
mmseqs_db = (args.mmseqs_db if args.mmseqs_db else get_eggnog_mmseqs_db())
if (not pexists(mmseqs_db)):
print(colorify(('MMseqs2 database %s not present. Use download_eggnog_database.py to fetch it' % mmseqs_db), 'red'))
raise EmapperException()
if (not args.input):
parser.error('An input fasta file is required (-i)')
if (not args.output):
parser.error('An output project name is required (-o)')
if (args.annotate_hits_table is not None):
print(colorify(f'--annotate_hits_table will be ignored, due to -m {SEARCH_MODE_MMSEQS2}', 'blue'))
args.annotate_hits_table = None
elif (args.mode == SEARCH_MODE_HMMER):
if (not args.input):
parser.error('An input file is required (-i)')
if (not args.output):
parser.error('An output project name is required (-o)')
if (not args.db):
parser.error('HMMER mode requires a target database (-d, --database).')
if (args.itype == ITYPE_CDS):
args.translate = True
if (((args.itype == ITYPE_GENOME) or (args.itype == ITYPE_META)) and (args.genepred == GENEPRED_MODE_SEARCH)):
parser.error('HMMER mode is not compatible with "--genepred search" option.')
if (args.annotate_hits_table is not None):
print(colorify(f'--annotate_hits_table will be ignored, due to -m {SEARCH_MODE_HMMER}', 'blue'))
args.annotate_hits_table = None
if (args.clean_overlaps is not None):
if (args.clean_overlaps == 'none'):
args.clean_overlaps = None
elif (args.mode == SEARCH_MODE_CACHE):
if (args.cache_file is None):
parser.error('A file with annotations and md5 of queries is required (-c FILE)')
if (args.decorate_gff != DECORATE_GFF_NONE):
print(colorify('WARNING: no GFF will be created for cache-based annotations. It is not implemented yet, sorry.', 'red'))
if (args.no_annot == True):
parser.error(f'Cache mode (-m {SEARCH_MODE_CACHE}) should be used to annotate.')
elif (args.mode == SEARCH_MODE_NO_SEARCH):
if ((args.no_annot == False) and (not args.annotate_hits_table)):
parser.error(f'No search mode (-m {SEARCH_MODE_NO_SEARCH}) requires a hits table to annotate (--annotate_hits_table FILE.seed_orthologs)')
if ((args.md5 == True) and (args.input is None)):
parser.error(f'--md5 requires an input FASTA file (-i FASTA).')
else:
parser.error(f'unrecognized search mode (-m {args.mode})')
args.dmnd_evalue = args.mmseqs_evalue = args.hmm_evalue = args.evalue
args.dmnd_score = args.mmseqs_score = args_hmm_score = args.score
args.qcov = args.query_cover
if ((args.no_annot == False) or (args.report_orthologs == True)):
if ((not pexists(get_eggnogdb_file())) and (args.mode != SEARCH_MODE_NOVEL_FAMS)):
print(colorify('Annotation database data/eggnog.db not present. Use download_eggnog_database.py to fetch it', 'red'))
raise EmapperException()
args.tax_scope_ids = parse_tax_scope(args.tax_scope)
if (args.target_taxa is not None):
args.target_taxa = args.target_taxa.split(',')
if (args.excluded_taxa is not None):
args.excluded_taxa = args.excluded_taxa.split(',')
if (args.go_evidence == 'experimental'):
args.go_evidence = set(['EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP'])
args.go_excluded = set(['ND', 'IEA'])
elif (args.go_evidence == 'non-electronic'):
args.go_evidence = None
args.go_excluded = set(['ND', 'IEA'])
elif (args.go_evidence == 'all'):
args.go_evidence = None
args.go_excluded = None
else:
raise ValueError('Invalid --go_evidence value')
if (args.pfam_realign == PFAM_REALIGN_NONE):
pass
elif ((args.pfam_realign == PFAM_REALIGN_REALIGN) or (args.pfam_realign == PFAM_REALIGN_DENOVO)):
if (not args.input):
parser.error(f'An input fasta file is required (-i) for --pfam_realign {args.pfam_realign}')
else:
raise ValueError(f'Invalid --pfam_realign option {args.pfam_realign}')
total_workers = (args.num_workers * args.num_servers)
if (args.cpu < total_workers):
parser.error(f'Less cpus ({args.cpu}) than total workers ({total_workers}) were specified.')
if ((args.cpu % total_workers) != 0):
parser.error(f'Number of cpus ({args.cpu}) must be a multiple of total workers ({total_workers}).')
args.cpus_per_worker = int((args.cpu / total_workers))
return args |
class Health(object):
def Check(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.health.v1.Health/Check', health__pb2.HealthCheckRequest.SerializeToString, health__pb2.HealthCheckResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def Watch(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.health.v1.Health/Watch', health__pb2.HealthCheckRequest.SerializeToString, health__pb2.HealthCheckResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
class CmdUnloggedinLook(Command):
key = syscmdkeys.CMD_LOGINSTART
locks = 'cmd:all()'
arg_regex = '^$'
def func(self):
menu_nodes = {'node_enter_username': node_enter_username, 'node_enter_password': node_enter_password, 'node_quit_or_login': node_quit_or_login}
MenuLoginEvMenu(self.caller, menu_nodes, startnode='node_enter_username', auto_look=False, auto_quit=False, cmd_on_exit=None) |
def add_new_changes(prev_changes: str):
changes = set(prev_changes.split('\n'))
with open(CHANGELOG_FILE, encoding='utf-8') as _file:
add_changes = False
line = _file.readline()
while line:
line = line.strip()
if (line == VERSION):
add_changes = True
elif add_changes:
if (line == ''):
break
changes.add(line)
line = _file.readline()
if (not add_changes):
print(f'CHANGELOG does not contain changes for {VERSION}...')
input('Press enter to try again...')
return add_new_changes(prev_changes)
return '\n'.join(sorted(changes, key=(lambda item: item.casefold()))) |
def getData(s, address):
s.flush()
n = s.write(bytearray(('H%d\r' % address), 'utf-8'))
buf = s.read(3)
print(buf)
buf = s.read(1)
print(buf)
buf = bytearray()
while True:
if (not s.in_waiting):
time.sleep(1)
if (not s.in_waiting):
break
buf += s.read(s.in_waiting)
print(('read %d bytes' % len(buf)))
return buf |
class AutumnWindAction(UserAction):
def __init__(self, source, target_list):
self.source = source
self.target = source
self.target_list = target_list
def apply_action(self):
g = self.game
src = self.source
for p in self.target_list:
g.process_action(AutumnWindEffect(src, p))
return True |
class KnowledgeSpaceEntity(Model):
__tablename__ = 'knowledge_space'
id = Column(Integer, primary_key=True)
name = Column(String(100))
vector_type = Column(String(100))
desc = Column(String(100))
owner = Column(String(100))
context = Column(Text)
gmt_created = Column(DateTime)
gmt_modified = Column(DateTime)
def __repr__(self):
return f"KnowledgeSpaceEntity(id={self.id}, name='{self.name}', vector_type='{self.vector_type}', desc='{self.desc}', owner='{self.owner}' context='{self.context}', gmt_created='{self.gmt_created}', gmt_modified='{self.gmt_modified}')" |
class Settings(object):
def __init__(self, common):
self.c = common
self.system = self.c.os
if (self.system == 'Windows'):
appdata = os.environ['APPDATA']
self.appdata_path = '{0}\\gpgsync'.format(appdata)
elif (self.system == 'Darwin'):
self.appdata_path = os.path.expanduser('~/Library/Application Support/GPG Sync')
else:
self.appdata_path = os.path.expanduser('~/.config/gpgsync')
self.c.log('Settings', '__init__', 'appdata_path: {}'.format(self.appdata_path))
self.load()
def get_appdata_path(self):
return self.appdata_path
def load(self):
start_new_settings = False
resave_settings = False
settings_file = os.path.join(self.appdata_path, 'settings.json')
if os.path.isfile(settings_file):
try:
self.settings = json.load(open(settings_file, 'r'))
load_settings = True
self.c.log('Settings', 'load', 'settings loaded from {}'.format(settings_file))
if ('keylists' in self.settings):
self.keylists = [Keylist(self.c).load(k) for k in self.settings['keylists']]
elif ('endpoints' in self.settings):
self.keylists = [Keylist(self.c).load(k) for k in self.settings['endpoints']]
resave_settings = True
self.c.log('Settings', 'load', "migrating settings from 'endpoints' to 'keylists'")
else:
self.keylists = []
if ('run_automatically' in self.settings):
self.run_automatically = self.settings['run_automatically']
else:
self.run_automatically = True
if ('run_autoupdate' in self.settings):
self.run_autoupdate = self.settings['run_autoupdate']
else:
self.run_autoupdate = True
if ('last_update_check' in self.settings):
try:
self.last_update_check = date_parser.parse(self.settings['last_update_check'])
except:
self.last_update_check = None
else:
self.last_update_check = None
if ('last_update_check_err' in self.settings):
self.last_update_check_err = self.settings['last_update_check_err']
else:
self.last_update_check_err = False
if ('update_interval_hours' in self.settings):
self.update_interval_hours = str.encode(self.settings['update_interval_hours'])
else:
self.update_interval_hours = b'12'
if ('automatic_update_use_proxy' in self.settings):
self.automatic_update_use_proxy = self.settings['automatic_update_use_proxy']
else:
self.automatic_update_use_proxy = False
if ('automatic_update_proxy_host' in self.settings):
self.automatic_update_proxy_host = str.encode(self.settings['automatic_update_proxy_host'])
else:
self.automatic_update_proxy_host = b'127.0.0.1'
if ('automatic_update_proxy_port' in self.settings):
self.automatic_update_proxy_port = str.encode(self.settings['automatic_update_proxy_port'])
else:
self.automatic_update_proxy_port = b'9050'
self.configure_run_automatically()
except:
self.c.log('Settings', 'load', 'error loading settings file, starting from scratch')
print('Error loading settings file, starting from scratch')
start_new_settings = True
else:
self.c.log('Settings', 'load', "settings file doesn't exist")
if (not self.migrate_settings_010_011()):
start_new_settings = True
if start_new_settings:
self.keylists = []
self.run_automatically = True
self.run_autoupdate = True
self.last_update_check = None
self.last_update_check_err = False
self.update_interval_hours = b'12'
self.automatic_update_use_proxy = False
self.automatic_update_proxy_host = b'127.0.0.1'
self.automatic_update_proxy_port = b'9050'
self.save()
self.configure_run_automatically()
if resave_settings:
self.save()
def save(self):
self.c.log('Settings', 'save')
self.settings = {'keylists': [e.serialize() for e in self.keylists], 'run_automatically': self.run_automatically, 'run_autoupdate': self.run_autoupdate, 'last_update_check': self.last_update_check, 'last_update_check_err': self.last_update_check_err, 'update_interval_hours': self.update_interval_hours, 'automatic_update_use_proxy': self.automatic_update_use_proxy, 'automatic_update_proxy_host': self.automatic_update_proxy_host, 'automatic_update_proxy_port': self.automatic_update_proxy_port}
if (not os.path.exists(self.appdata_path)):
os.makedirs(self.appdata_path)
with open(os.path.join(self.appdata_path, 'settings.json'), 'w') as settings_file:
json.dump(self.settings, settings_file, default=self.c.serialize_settings, indent=4)
self.configure_run_automatically()
return True
def configure_run_automatically(self):
self.c.log('Settings', 'configure_run_automatically')
autorun_dir = None
if (self.system == 'Darwin'):
share_filename = 'org.firstlook.gpgsync.plist'
autorun_dir = os.path.expanduser('~/Library/LaunchAgents')
elif (self.system == 'Windows'):
share_filename = 'GPG Sync.lnk'
autorun_dir = os.path.join(os.environ['APPDATA'], 'Microsoft\\Windows\\Start Menu\\Programs\\Startup')
elif (self.system == 'Linux'):
share_filename = 'gpgsync.desktop'
autorun_dir = os.path.expanduser('~/.config/autostart')
if autorun_dir:
if (not os.path.exists(autorun_dir)):
os.makedirs(autorun_dir)
autorun_filename = os.path.join(autorun_dir, share_filename)
if self.run_automatically:
if (self.system == 'Windows'):
src_filename = os.path.join(os.path.join(os.environ['PROGRAMDATA'], 'Microsoft\\Windows\\Start Menu\\Programs'), share_filename)
if os.path.exists(src_filename):
shutil.copyfile(src_filename, autorun_filename)
else:
self.c.log('Settings', 'configure_run_automatically', 'GPG Sync not installed, skipping run automatically')
else:
buf = open(self.c.get_resource_path(share_filename)).read()
open(autorun_filename, 'w').write(buf)
elif os.path.exists(autorun_filename):
os.remove(autorun_filename)
'\n If necessary, migrate settings from 0.1.0 (in an old location and in pickle\n format) to 0.1.1 (in a new location and in json format). Should only run\n once per user.\n '
def migrate_settings_010_011(self):
old_settings_path = os.path.expanduser('~/.gpgsync')
if os.path.isfile(old_settings_path):
self.c.log('Settings', 'migrate_settings_010_011', 'there is an old settings file, converting it to a new one')
pickle_data = open(old_settings_path, 'rb').read()
pickle_data = pickle_data.replace(b'gpgsync.endpoint\nEndpoint\n', b'gpgsync.settings\nOldEndpoint\n')
try:
settings = pickle.loads(pickle_data)
self.c.log('Settings', 'migrate_settings_010_011', 'settings loaded from {}'.format(old_settings_path))
if ('endpoints' in settings):
self.endpoints = []
for old_e in settings['endpoints']:
e = Keylist(self.c)
e.verified = old_e.verified
e.fingerprint = old_e.fingerprint
e.url = old_e.url
e.sig_url = old_e.sig_url
e.keyserver = old_e.keyserver
e.use_proxy = old_e.use_proxy
e.proxy_host = old_e.proxy_host
e.proxy_port = old_e.proxy_port
e.last_checked = old_e.last_checked
e.last_synced = old_e.last_synced
e.last_failed = old_e.last_failed
e.error = old_e.error
e.warning = old_e.warning
self.endpoints.append(e)
else:
self.endpoints = []
if ('run_automatically' in settings):
self.run_automatically = settings['run_automatically']
else:
self.run_automatically = True
if ('run_autoupdate' in settings):
self.run_autoupdate = settings['run_autoupdate']
else:
self.run_autoupdate = True
if ('last_update_check' in settings):
self.last_update_check = settings['last_update_check']
else:
self.last_update_check = None
if ('last_update_check_err' in settings):
self.last_update_check_err = settings['last_update_check_err']
else:
self.last_update_check_err = False
if ('update_interval_hours' in settings):
self.update_interval_hours = settings['update_interval_hours']
else:
self.update_interval_hours = b'12'
if ('automatic_update_use_proxy' in settings):
self.automatic_update_use_proxy = settings['automatic_update_use_proxy']
else:
self.automatic_update_use_proxy = False
if ('automatic_update_proxy_host' in settings):
self.automatic_update_proxy_host = settings['automatic_update_proxy_host']
else:
self.automatic_update_proxy_host = b'127.0.0.1'
if ('automatic_update_proxy_port' in settings):
self.automatic_update_proxy_port = settings['automatic_update_proxy_port']
else:
self.automatic_update_proxy_port = b'9050'
self.save()
os.remove(old_settings_path)
return True
except:
self.c.log('Settings', 'migrate_settings_010_011', 'exception thrown, just start over with settings')
return False
return False |
class OptionSeriesCylinderMarkerStatesSelect(Options):
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get('#cccccc')
def fillColor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#000000')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(2)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(None)
def radius(self, num: float):
self._config(num, js_type=False) |
class Committee(BaseConcreteCommittee):
__table_args__ = {'extend_existing': True}
__tablename__ = 'ofec_committee_detail_mv'
sponsor_candidate_list = db.relationship('PacSponsorCandidate', primaryjoin='and_(\n foreign(PacSponsorCandidate.committee_id) == Committee.committee_id,\n )', lazy='joined') |
class TestValidatePathTests():
def test_path_does_not_exist(self):
with pytest.raises(ValueError) as exc:
config.validate_path('/does/not/exist')
assert (str(exc.value) == "'/does/not/exist' does not exist.")
def test_path_is_none(self):
with pytest.raises(ValueError) as exc:
config.validate_path(None)
assert (str(exc.value) == 'None does not exist.')
def test_path_exists(self):
result = config.validate_path(__file__)
assert (result == __file__)
assert isinstance(result, str) |
class TestRefResolverExceptions():
def test_cast_raise(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
with pytest.raises(_SpockResolverError):
config = SpockBuilder(RefCastRaise, RefClass, desc='Test Builder')
config.generate()
def test_invalid_raise(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
with pytest.raises(_SpockVarResolverError):
config = SpockBuilder(RefInvalid, RefClass, desc='Test Builder')
config.generate()
def test_not_spock(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
with pytest.raises(_SpockVarResolverError):
config = SpockBuilder(RefNotSpockClsRef, RefClass, desc='Test Builder')
config.generate()
def test_ref_cycle(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
with pytest.raises(_SpockInstantiationError):
config = SpockBuilder(RefCycle1, RefCycle2, RefCycle3, desc='Test Builder')
config.generate()
def test_self_cycle(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
with pytest.raises(_SpockInstantiationError):
config = SpockBuilder(SelfCycle, desc='Test Builder')
config.generate() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.