code stringlengths 281 23.7M |
|---|
class BMGInference():
_fix_observe_true: bool = False
_pd: Optional[prof.ProfilerData] = None
def __init__(self):
pass
def _begin(self, s: str) -> None:
pd = self._pd
if (pd is not None):
pd.begin(s)
def _finish(self, s: str) -> None:
pd = self._pd
if (pd is not None):
pd.finish(s)
def _accumulate_graph(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)]) -> BMGRuntime:
_verify_queries_and_observations(queries, observations, True)
rt = BMGRuntime()
rt._pd = self._pd
bmg = rt.accumulate_graph(queries, observations)
bmg._fix_observe_true = self._fix_observe_true
return rt
def _transpose_samples(self, raw):
self._begin(prof.transpose_samples)
samples = []
num_samples = len(raw)
bmg_query_count = len(raw[0])
transposed = [torch.tensor([x]) for x in zip(*raw)]
assert (len(transposed) == bmg_query_count)
assert (len(transposed[0]) == 1)
assert (len(transposed[0][0]) == num_samples)
for i in range(len(transposed)):
t = transposed[i]
if (len(t.shape) == 4):
if (t.shape[3] == 1):
assert (t.shape[0] == 1)
assert (t.shape[1] == num_samples)
samples.append(t.reshape(1, num_samples, t.shape[2]))
else:
samples.append(t.transpose(2, 3))
else:
samples.append(t)
assert (len(samples) == bmg_query_count)
assert (len(samples[0]) == 1)
assert (len(samples[0][0]) == num_samples)
self._finish(prof.transpose_samples)
return samples
def _build_mcsamples(self, rv_to_query, samples, query_to_query_id, num_samples: int, num_chains: int, num_adaptive_samples: int) -> MonteCarloSamples:
self._begin(prof.build_mcsamples)
assert (len(samples) == num_chains)
results = []
for chain_num in range(num_chains):
result: Dict[(RVIdentifier, torch.Tensor)] = {}
for (rv, query) in rv_to_query.items():
query_id = query_to_query_id[query]
result[rv] = samples[chain_num][query_id]
results.append(result)
if (num_chains == 1):
mcsamples = MonteCarloSamples(results[0], num_adaptive_samples, stack_not_cat=True)
else:
mcsamples = MonteCarloSamples(results, num_adaptive_samples, stack_not_cat=False)
self._finish(prof.build_mcsamples)
return mcsamples
def _infer(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)], num_samples: int, num_chains: int=1, num_adaptive_samples: int=0, inference_type: InferenceType=InferenceType.NMC, produce_report: bool=True, skip_optimizations: Set[str]=default_skip_optimizations) -> Tuple[(MonteCarloSamples, PerformanceReport)]:
if produce_report:
self._pd = prof.ProfilerData()
rt = self._accumulate_graph(queries, observations)
bmg = rt._bmg
report = pr.PerformanceReport()
self._begin(prof.infer)
generated_graph = to_bmg_graph(bmg, skip_optimizations)
g = generated_graph.graph
query_to_query_id = generated_graph.query_to_query_id
samples = []
if (len(query_to_query_id) != 0):
g.collect_performance_data(produce_report)
self._begin(prof.graph_infer)
default_config = InferConfig()
default_config.num_warmup = num_adaptive_samples
num_adaptive_samples = 0
try:
raw = g.infer(num_samples, inference_type, 5123401, num_chains, default_config)
except RuntimeError as e:
raise RuntimeError(('Error during BMG inference\n' + 'Note: the runtime error from BMG may not be interpretable.\n')) from e
self._finish(prof.graph_infer)
if produce_report:
self._begin(prof.deserialize_perf_report)
js = g.performance_report()
report = pr.json_to_perf_report(js)
self._finish(prof.deserialize_perf_report)
assert (len(raw) == num_chains)
assert all([(len(r) == num_samples) for r in raw])
samples = [self._transpose_samples(r) for r in raw]
mcsamples = self._build_mcsamples(rv_to_query(generated_graph.bmg), samples, query_to_query_id, num_samples, num_chains, num_adaptive_samples)
self._finish(prof.infer)
if produce_report:
report.profiler_report = self._pd.to_report()
return (mcsamples, report)
def infer(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)], num_samples: int, num_chains: int=4, num_adaptive_samples: int=0, inference_type: InferenceType=InferenceType.NMC, skip_optimizations: Set[str]=default_skip_optimizations) -> MonteCarloSamples:
(samples, _) = self._infer(queries, observations, num_samples, num_chains, num_adaptive_samples, inference_type, False, skip_optimizations)
return samples
def to_dot(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)], after_transform: bool=True, label_edges: bool=False, skip_optimizations: Set[str]=default_skip_optimizations) -> str:
node_types = False
node_sizes = False
edge_requirements = False
bmg = self._accumulate_graph(queries, observations)._bmg
return to_dot(bmg, node_types, node_sizes, edge_requirements, after_transform, label_edges, skip_optimizations)
def _to_mini(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)], indent=None) -> str:
bmg = self._accumulate_graph(queries, observations)._bmg
return to_mini(bmg, indent)
def to_graphviz(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)], after_transform: bool=True, label_edges: bool=False, skip_optimizations: Set[str]=default_skip_optimizations) -> graphviz.Source:
s = self.to_dot(queries, observations, after_transform, label_edges, skip_optimizations)
return graphviz.Source(s)
def to_cpp(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)]) -> str:
bmg = self._accumulate_graph(queries, observations)._bmg
return to_bmg_cpp(bmg).code
def to_python(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)]) -> str:
bmg = self._accumulate_graph(queries, observations)._bmg
return to_bmg_python(bmg).code
def to_bm_python(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)]) -> str:
bmg = self._accumulate_graph(queries, observations)._bmg
return to_bm_python(bmg)
def to_graph(self, queries: List[RVIdentifier], observations: Dict[(RVIdentifier, torch.Tensor)]) -> Tuple[(Graph, Dict[(RVIdentifier, int)])]:
rt = self._accumulate_graph(queries, observations)
bmg = rt._bmg
generated_graph = to_bmg_graph(bmg)
g = generated_graph.graph
query_to_query_id = generated_graph.query_to_query_id
rv_to_query_map = rv_to_query(generated_graph.bmg)
rv_to_query_id = {rv: query_to_query_id[rv_to_query_map[rv]] for rv in queries}
return (g, rv_to_query_id) |
def test_event_role_delete(db, client, user, jwt):
uer = UsersEventsRolesSubFactory(user=user)
RoleInviteSubFactory(status='accepted', event=uer.event, email=user.email, role=uer.role)
db.session.commit()
assert (RoleInvite.query.filter_by(email=user.email, event=uer.event, role=uer.role).count() == 1)
resp = client.get(f'/v1/events/{uer.event_id}/users-events-roles', headers=jwt)
assert (resp.status_code == 200)
resp = client.delete(f'/v1/users-events-roles/{uer.id}', headers=jwt)
assert (resp.status_code == 200)
assert (json.loads(resp.data) == {'jsonapi': {'version': '1.0'}, 'meta': {'message': 'Object successfully deleted'}})
assert (RoleInvite.query.filter_by(email=user.email, event=uer.event, role=uer.role).count() == 0)
resp = client.get(f'/v1/events/{uer.event_id}/users-events-roles', headers=jwt)
assert (resp.status_code == 403) |
class TestElections(ApiBaseTest):
def setUp(self):
super().setUp()
self.candidate = factories.CandidateDetailFactory()
self.candidates = [factories.CandidateHistoryFactory(candidate_id=self.candidate.candidate_id, state='NY', two_year_period=2012, election_years=[2010, 2012], cycles=[2010, 2012], office='S', candidate_election_year=2012), factories.CandidateHistoryFactory(candidate_id=self.candidate.candidate_id, state='NY', two_year_period=2010, election_years=[2010, 2012], cycles=[2010, 2012], office='S', candidate_election_year=2012)]
self.committees = [factories.CommitteeHistoryFactory(cycle=2012, designation='P'), factories.CommitteeHistoryFactory(cycle=2012, designation='A')]
[factories.CandidateElectionFactory(candidate_id=self.candidate.candidate_id, cand_election_year=year) for year in [2010, 2012]]
[factories.CommitteeDetailFactory(committee_id=each.committee_id) for each in self.committees]
db.session.flush()
self.candidate_committee_links = [factories.CandidateCommitteeLinkFactory(candidate_id=self.candidate.candidate_id, committee_id=self.committees[0].committee_id, committee_designation='A', fec_election_year=2012, election_yr_to_be_included=2012), factories.CandidateCommitteeLinkFactory(candidate_id=self.candidate.candidate_id, committee_id=self.committees[1].committee_id, committee_designation='P', fec_election_year=2012, election_yr_to_be_included=2012), factories.CandidateCommitteeLinkFactory(candidate_id=self.candidate.candidate_id, committee_id=self.committees[1].committee_id, committee_designation='P', fec_election_year=2010, election_yr_to_be_included=2012), factories.CandidateCommitteeLinkFactory(candidate_id=self.candidate.candidate_id, committee_id=self.committees[1].committee_id, committee_designation='P', fec_election_year=2010)]
self.totals = [factories.TotalsHouseSenateFactory(receipts=50, disbursements=75, committee_id=self.committees[0].committee_id, coverage_end_date=datetime.datetime(2012, 9, 30), last_cash_on_hand_end_period=100, cycle=2012), factories.TotalsHouseSenateFactory(receipts=50, disbursements=75, committee_id=self.committees[1].committee_id, coverage_end_date=datetime.datetime(2012, 12, 31), last_cash_on_hand_end_period=100, cycle=2012), factories.TotalsHouseSenateFactory(receipts=50, disbursements=75, committee_id=self.committees[1].committee_id, coverage_end_date=datetime.datetime(2012, 12, 31), last_cash_on_hand_end_period=300, cycle=2010)]
self.president_candidate = factories.CandidateDetailFactory()
self.president_candidates = [factories.CandidateHistoryFactory(candidate_id=self.president_candidate.candidate_id, state='NY', two_year_period=2020, office='P', candidate_inactive=False, candidate_election_year=2020), factories.CandidateHistoryFactory(candidate_id=self.president_candidate.candidate_id, state='NY', two_year_period=2018, office='P', candidate_inactive=False, candidate_election_year=2020)]
self.president_committees = [factories.CommitteeHistoryFactory(cycle=2020, designation='P'), factories.CommitteeHistoryFactory(cycle=2020, designation='J')]
[factories.CandidateElectionFactory(candidate_id=self.president_candidate.candidate_id, cand_election_year=year) for year in [2016, 2020]]
[factories.CommitteeDetailFactory(committee_id=each.committee_id) for each in self.president_committees]
db.session.flush()
self.president_candidate_committee_links = [factories.CandidateCommitteeLinkFactory(candidate_id=self.president_candidate.candidate_id, committee_id=self.president_committees[0].committee_id, committee_designation='P', fec_election_year=2020, cand_election_year=2020, election_yr_to_be_included=2020), factories.CandidateCommitteeLinkFactory(candidate_id=self.president_candidate.candidate_id, committee_id=self.president_committees[0].committee_id, committee_designation='P', fec_election_year=2018, cand_election_year=2020, election_yr_to_be_included=2020), factories.CandidateCommitteeLinkFactory(candidate_id=self.president_candidate.candidate_id, committee_id=self.president_committees[1].committee_id, committee_designation='P', fec_election_year=2018, cand_election_year=2020, election_yr_to_be_included=2020)]
self.presidential_totals = [factories.TotalsCombinedFactory(receipts=50, disbursements=75, committee_id=self.president_committees[0].committee_id, coverage_end_date=datetime.datetime(2019, 9, 30), last_cash_on_hand_end_period=0, cycle=2020), factories.TotalsCombinedFactory(receipts=1, disbursements=1, committee_id=self.president_committees[1].committee_id, coverage_end_date=datetime.datetime(2017, 12, 31), last_cash_on_hand_end_period=100, cycle=2018), factories.TotalsCombinedFactory(receipts=25, disbursements=10, committee_id=self.president_committees[0].committee_id, coverage_end_date=datetime.datetime(2017, 12, 31), last_cash_on_hand_end_period=300, cycle=2018)]
def test_missing_params(self):
response = self.app.get(api.url_for(ElectionView))
self.assertEqual(response.status_code, 422)
def test_conditional_missing_params(self):
response = self.app.get(api.url_for(ElectionView, office='president', cycle=2012))
self.assertEqual(response.status_code, 200)
response = self.app.get(api.url_for(ElectionView, office='senate', cycle=2012))
self.assertEqual(response.status_code, 422)
response = self.app.get(api.url_for(ElectionView, office='senate', cycle=2012, state='NY'))
self.assertEqual(response.status_code, 200)
response = self.app.get(api.url_for(ElectionView, office='house', cycle=2012, state='NY'))
self.assertEqual(response.status_code, 422)
response = self.app.get(api.url_for(ElectionView, office='house', cycle=2012, state='NY', district='01'))
self.assertEqual(response.status_code, 200)
def test_empty_query(self):
results = self._results(api.url_for(ElectionView, office='senate', cycle=2012, state='ZZ'))
assert (len(results) == 0)
def test_elections(self):
results = self._results(api.url_for(ElectionView, office='senate', cycle=2012, state='NY', election_full=False))
self.assertEqual(len(results), 1)
totals = [each for each in self.totals if (each.cycle == 2012)]
expected = {'candidate_id': self.candidate.candidate_id, 'candidate_name': self.candidate.name, 'incumbent_challenge_full': self.candidate.incumbent_challenge_full, 'party_full': self.candidate.party_full, 'total_receipts': sum((each.receipts for each in totals)), 'total_disbursements': sum((each.disbursements for each in totals)), 'cash_on_hand_end_period': sum((each.last_cash_on_hand_end_period for each in totals))}
assert_dicts_subset(results[0], expected)
assert (set((each.committee_id for each in self.committees)) == set(results[0]['committee_ids']))
def test_elections_full(self):
results = self._results(api.url_for(ElectionView, office='senate', cycle=2012, state='NY', election_full='true'))
totals = self.totals
cash_on_hand_totals = self.totals[:2]
expected = {'candidate_id': self.candidate.candidate_id, 'candidate_name': self.candidate.name, 'incumbent_challenge_full': self.candidate.incumbent_challenge_full, 'party_full': self.candidate.party_full, 'total_receipts': sum((each.receipts for each in totals)), 'total_disbursements': sum((each.disbursements for each in totals)), 'cash_on_hand_end_period': sum((each.last_cash_on_hand_end_period for each in cash_on_hand_totals))}
assert (len(results) == 1)
assert_dicts_subset(results[0], expected)
assert (set(results[0]['committee_ids']) == set((each.committee_id for each in self.committees if (each.designation != 'J'))))
def test_elections_year_null(self):
results = self._results(api.url_for(ElectionView, office='senate', cycle=2010, state='NY', election_full='true'))
totals = self.totals
cash_on_hand_totals = self.totals[:2]
assert (len(results) == 0)
def test_president_elections_full(self):
results = self._results(api.url_for(ElectionView, office='president', cycle=2020, election_full='true'))
totals = self.presidential_totals
cash_on_hand_totals = self.presidential_totals[:2]
expected = {'candidate_id': self.president_candidate.candidate_id, 'candidate_name': self.president_candidate.name, 'incumbent_challenge_full': self.president_candidate.incumbent_challenge_full, 'party_full': self.president_candidate.party_full, 'total_receipts': sum((each.receipts for each in totals)), 'total_disbursements': sum((each.disbursements for each in totals)), 'cash_on_hand_end_period': sum((each.last_cash_on_hand_end_period for each in cash_on_hand_totals))}
assert (len(results) == 1)
assert_dicts_subset(results[0], expected)
def test_electionview_excludes_jfc(self):
self.candidate_committee_links[0].committee_designation = 'J'
self.committees[0].designation = 'J'
results = self._results(api.url_for(ElectionView, office='senate', cycle=2012, state='NY', election_full='true'))
totals_without_jfc = self.totals[1:]
cash_on_hand_without_jfc = self.totals[1:2]
expected = {'candidate_id': self.candidate.candidate_id, 'candidate_name': self.candidate.name, 'incumbent_challenge_full': self.candidate.incumbent_challenge_full, 'party_full': self.candidate.party_full, 'total_receipts': sum((each.receipts for each in totals_without_jfc)), 'total_disbursements': sum((each.disbursements for each in totals_without_jfc)), 'cash_on_hand_end_period': sum((each.last_cash_on_hand_end_period for each in cash_on_hand_without_jfc))}
assert (len(results) == 1)
assert_dicts_subset(results[0], expected)
assert (set(results[0]['committee_ids']) == set((each.committee_id for each in self.committees if (each.designation != 'J'))))
def test_election_summary(self):
results = self._response(api.url_for(ElectionSummary, office='senate', cycle=2012, state='NY', election_full=False))
totals = [each for each in self.totals if (each.cycle == 2012)]
self.assertEqual(results['count'], 1)
self.assertEqual(results['receipts'], sum((each.receipts for each in totals)))
self.assertEqual(results['disbursements'], sum((each.disbursements for each in totals))) |
class DEXLDAPAuthenticator(AbstractAuthenticator):
_type = SupportedAuthProviders.DEX_LDAP
def authenticate(self, kf_endpoint: str, runtime_config_name: str, username: str=None, password: str=None) -> Optional[str]:
if (_empty_or_whitespaces_only(username) or _empty_or_whitespaces_only(password)):
raise AuthenticationError(f"Credentials are required to perform this type of authentication. Update runtime configuration '{runtime_config_name}' and try again.", provider=self._type)
with requests.Session() as s:
request_history = []
resp = s.get(kf_endpoint, allow_redirects=True)
request_history.append((kf_endpoint, resp))
if (resp.status_code != HTTPStatus.OK):
raise AuthenticationError(f"Error detecting whether Kubeflow server at {kf_endpoint} is secured: HTTP status code {resp.status_code}Update runtime configuration '{runtime_config_name}' and try again.", provider=self._type, request_history=request_history)
if (len(resp.history) == 0):
raise AuthenticationError(f"The Kubeflow server at {kf_endpoint} is not secured using DEX with LDAP. Update the authentication type in runtime configuration '{runtime_config_name}' and try again.", provider=self._type, request_history=request_history)
redirect_url_obj = urlsplit(resp.url)
if re.search('/auth$', redirect_url_obj.path):
redirect_url_obj = redirect_url_obj._replace(path=re.sub('/auth$', '/auth/ldap', redirect_url_obj.path))
else:
m = re.search('/auth/([^/]*)/?', redirect_url_obj.path)
if (m and (m.group(1) != 'ldap')):
raise AuthenticationError(f"The Kubeflow server at {kf_endpoint} redirected to an unexpected HTTP path ('{redirect_url_obj.path}'). Verify that Kubeflow is configured for '{self._type.name}' and, if necessary, update the authentication type in runtime configuration '{runtime_config_name}'.", provider=self._type, request_history=request_history)
if re.search('/auth/ldap/login$', redirect_url_obj.path):
dex_login_url = redirect_url_obj.geturl()
else:
resp = s.get(redirect_url_obj.geturl(), allow_redirects=True)
request_history.append((redirect_url_obj.geturl(), resp))
if (resp.status_code != HTTPStatus.OK):
raise AuthenticationError(f'Error redirecting to the DEX LDAP login page: HTTP status code {resp.status_code}.', provider=self._type, request_history=request_history)
dex_login_url = resp.url
resp = s.post(dex_login_url, data={'login': username, 'password': password}, allow_redirects=True)
request_history.append((dex_login_url, resp))
if (len(resp.history) == 0):
raise AuthenticationError(f"The DEX LDAP credentials are probably invalid. Update runtime configuration '{runtime_config_name}' and try again.", provider=self._type, request_history=request_history)
return '; '.join([f'{c.name}={c.value}' for c in s.cookies]) |
def test_disable_tuple_notation_option():
'
schema = {'namespace': 'namespace', 'name': 'name', 'type': 'record', 'fields': [{'name': 'foo', 'type': ['string', {'type': 'array', 'items': 'string'}]}]}
new_record = roundtrip(schema, {'foo': ('string', '0')}, writer_kwargs={'disable_tuple_notation': True})
assert (new_record == {'foo': ['string', '0']}) |
class PersistentWebSocket():
def __init__(self, endpoint_uri: URI, websocket_kwargs: Any) -> None:
self.ws: Optional[WebSocketClientProtocol] = None
self.endpoint_uri = endpoint_uri
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self) -> WebSocketClientProtocol:
if (self.ws is None):
self.ws = (await connect(uri=self.endpoint_uri, **self.websocket_kwargs))
return self.ws
async def __aexit__(self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType) -> None:
if (exc_val is not None):
try:
(await self.ws.close())
except Exception:
pass
self.ws = None |
def process_crosslinks(crystallized_state, crosslinks):
main_crosslink = {}
for c in crosslinks:
vote_count = 0
mask = bytearray(c.voter_bitmask)
for byte in mask:
for j in range(8):
vote_count += ((byte >> j) % 2)
if (vote_count > main_crosslink.get(c.shard_id, (b'', 0, b''))[1]):
main_crosslink[c.shard_id] = (c.shard_block_hash, vote_count, mask)
new_crosslink_records = [x for x in crystallized_state.crosslink_records]
deltas = ([0] * len(crystallized_state.active_validators))
for shard in range(SHARD_COUNT):
indices = get_shard_attesters(crystallized_state, shard)
(h, votes, mask) = main_crosslink.get(shard, (b'', 0, bytearray(((len(indices) + 7) // 8))))
crosslink_distance = (crystallized_state.current_epoch - crystallized_state.crosslink_records[shard].epoch)
online_reward = (3 if (crosslink_distance <= 2) else 0)
offline_penalty = (crosslink_distance * 2)
for (i, index) in enumerate(indices):
if (mask[(i // 8)] & (1 << (i % 8))):
deltas[i] += online_reward
else:
deltas[i] -= offline_penalty
print(('Shard %d: most recent crosslink %d, reward: (%d, %d), votes: %d of %d (%.2f%%)' % (shard, crystallized_state.crosslink_records[shard].epoch, online_reward, (- offline_penalty), votes, len(indices), ((votes * 100) / len(indices)))))
if ((votes * 3) >= (len(indices) * 2)):
new_crosslink_records[shard] = CrosslinkRecord(hash=h, epoch=crystallized_state.current_epoch)
print(('New crosslink %s' % hex(int.from_bytes(h, 'big'))))
print(('Total deposit change from crosslinks: %d' % sum(deltas)))
return (deltas, new_crosslink_records) |
_os(*metadata.platforms)
def main():
winword = 'C:\\Users\\Public\\winword.exe'
svchost = 'C:\\Users\\Public\\svchost.exe'
user32 = 'C:\\Windows\\System32\\user32.dll'
dll = 'C:\\Users\\Public\\taskschd.dll'
ps1 = 'C:\\Users\\Public\\Invoke-ImageLoad.ps1'
rcedit = 'C:\\Users\\Public\\rcedit.exe'
task = 'C:\\Windows\\System32\\Tasks\\a.xml'
common.copy_file(user32, dll)
common.copy_file(PS1_FILE, ps1)
common.copy_file(RENAMER, rcedit)
common.copy_file(EXE_FILE, winword)
common.copy_file(EXE_FILE, svchost)
common.log('Modifying the OriginalFileName')
common.execute([rcedit, dll, '--set-version-string', 'OriginalFilename', 'taskschd.dll'])
common.log('Loading taskschd.dll')
common.execute([winword, '-c', f'Import-Module {ps1}; Invoke-ImageLoad {dll}'], timeout=10)
common.execute([svchost, '-c', f'New-Item -Path {task} -Type File'], timeout=10)
common.remove_files(dll, ps1, rcedit, task, winword, svchost) |
class C3():
def y(data: List[dict], y_columns: List[str], x_axis: str, options: dict=None) -> dict:
is_data = {'labels': OrderedSet(), 'datasets': [], 'series': [], 'python': True}
if ((data is None) or (y_columns is None)):
return is_data
if ((options is not None) and (options.get('agg') == 'distinct')):
for y in y_columns:
is_data['datasets'].append([])
is_data['series'].append(y)
is_data['labels'] = []
for rec in data:
is_data['labels'].append(rec[x_axis])
for (i, y) in enumerate(y_columns):
is_data['datasets'][i].append(rec.get(y))
else:
agg_data = {}
for rec in data:
for y in y_columns:
if ((y in rec) and (rec[y] is not None)):
agg_data.setdefault(y, {})[rec[x_axis]] = (agg_data.get(y, {}).get(rec[x_axis], 0) + float(rec[y]))
for c in y_columns:
for (x, y) in agg_data.get(c, {}).items():
is_data['labels'].add(x)
if ((options is not None) and options.get('sorted', False)):
is_data['labels'] = sorted(is_data['labels'])
for (i, y) in enumerate(y_columns):
is_data['datasets'].append([agg_data.get(y, {}).get(x) for x in sorted(is_data['labels'])])
is_data['series'].append(y)
else:
for (i, y) in enumerate(y_columns):
is_data['datasets'].append([agg_data.get(y, {}).get(x) for x in is_data['labels']])
is_data['series'].append(y)
return is_data |
def _join_tokens_list(tokens: List[bytes]) -> str:
decoded = tokens[:1]
skip = True
for token in tokens:
if skip:
skip = False
continue
token_partial = token[:2]
try:
decoded.append((_HEX_TO_BYTE[token_partial] + token[2:]))
except KeyError:
decoded.append((b'%' + token))
return b''.join(decoded).decode('utf-8', 'replace') |
def remove_role(moderator: ModeratorModel, role: str):
_check_roles([role])
with session() as s:
moderator_orm_model = s.query(ModeratorOrmModel).filter_by(id=moderator.id).one()
if (role not in moderator_orm_model.roles):
raise ArgumentError('Role not added')
moderator_orm_model.roles.remove(role)
s.commit() |
def extractMentallycrippledtranslationsBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Summoner of Miracles', 'Summoner of Miracles', 'translated'), ('One Punch of Justice', 'One Punch of Justice', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
.feature('unit')
.story('tasks', 'purge')
class TestPurge():
def test_init(self):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
with patch.object(FledgeProcess, '__init__') as mock_process:
with patch.object(FLCoreLogger, 'get_logger') as log:
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
assert isinstance(p, Purge)
assert isinstance(p._audit, AuditLogger)
log.assert_called_once_with('Data Purge')
mock_process.assert_called_once_with()
async def test_write_statistics(self):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
_rv = ((await mock_value('')) if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)) else asyncio.ensure_future(mock_value('')))
with patch.object(FledgeProcess, '__init__'):
with patch.object(Statistics, '_load_keys', return_value=_rv):
with patch.object(Statistics, 'update', return_value=_rv) as mock_stats_update:
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._storage_async = mock_storage_client_async
(await p.write_statistics(1, 2))
mock_stats_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
async def test_set_configuration(self):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
_rv = ((await mock_value('')) if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)) else asyncio.ensure_future(mock_value('')))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._storage = MagicMock(spec=StorageClientAsync)
mock_cm = ConfigurationManager(p._storage)
with patch.object(mock_cm, 'create_category', return_value=_rv) as mock_create_cat:
with patch.object(mock_cm, 'create_child_category', return_value=_rv) as mock_create_child_cat:
with patch.object(mock_cm, 'get_category_all_items', return_value=_rv) as mock_get_cat:
(await p.set_configuration())
mock_get_cat.assert_called_once_with('PURGE_READ')
mock_create_child_cat.assert_called_once_with('Utilities', ['PURGE_READ'])
(args, _) = mock_create_cat.call_args
assert (4 == len(args))
assert (5 == len(args[1].keys()))
assert ('PURGE_READ' == args[0])
assert ('Purge the readings, log, statistics history table' == args[2])
assert (args[3] is True)
()
async def store_purge(self, **kwargs):
if ((kwargs.get('age') == '-1') or (kwargs.get('size') == '-1')):
raise StorageServerError(400, 'Bla', 'Some Error')
return {'readings': 10, 'removed': 1, 'unsentPurged': 2, 'unsentRetained': 7, 'duration': 100, 'method': 'mock'}
config = {'purgeAgeSize': {'retainUnsent': {'value': 'purge unsent'}, 'age': {'value': '72'}, 'size': {'value': '20'}}, 'purgeAge': {'retainUnsent': {'value': 'purge unsent'}, 'age': {'value': '72'}, 'size': {'value': '0'}}, 'purgeSize': {'retainUnsent': {'value': 'purge unsent'}, 'age': {'value': '0'}, 'size': {'value': '100'}}, 'retainAgeSize': {'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': '72'}, 'size': {'value': '20'}}, 'retainAge': {'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': '72'}, 'size': {'value': '0'}}, 'retainSize': {'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': '0'}, 'size': {'value': '100'}}, 'retainSizeAny': {'retainUnsent': {'value': 'retain unsent to any destination'}, 'age': {'value': '0'}, 'size': {'value': '100'}}}
.parametrize('conf, expected_return, expected_calls', [(config['purgeAgeSize'], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'purge'}), (config['purgeAge'], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'purge'}), (config['purgeSize'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'purge'}), (config['retainAgeSize'], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'retainall'}), (config['retainAge'], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'retainall'}), (config['retainSize'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retainall'}), (config['retainSizeAny'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retainany'})])
async def test_purge_data(self, conf, expected_return, expected_calls):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
mock_stream_result = q_result('streams')
payload = {'aggregate': {'operation': 'min', 'column': 'last_object'}}
if (expected_calls['flag'] == 'retainany'):
mock_stream_result = q_result('streams', 'any')
payload = {'aggregate': {'operation': 'max', 'column': 'last_object'}}
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv1 = (await mock_stream_result)
_rv2 = (await mock_value(''))
else:
_rv1 = asyncio.ensure_future(mock_stream_result)
_rv2 = asyncio.ensure_future(mock_value(''))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._logger = FLCoreLogger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._logger.debug = MagicMock()
p._storage_async = MagicMock(spec=StorageClientAsync)
p._readings_storage_async = MagicMock(spec=ReadingsStorageClientAsync)
audit = p._audit
with patch.object(p._storage_async, 'query_tbl_with_payload', return_value=_rv1) as patch_storage:
with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge) as mock_storage_purge:
with patch.object(audit, 'information', return_value=_rv2) as audit_info:
t_expected_return = (await p.purge_data(conf))
assert (expected_return == (await p.purge_data(conf)))
assert audit_info.called
(_, kwargs) = mock_storage_purge.call_args
assert (kwargs == expected_calls)
assert patch_storage.called
assert (4 == patch_storage.call_count)
(args, _) = patch_storage.call_args
assert ('streams' == args[0])
assert (payload == json.loads(args[1]))
.parametrize('conf, expected_return', [({'retainUnsent': {'value': 'purge unsent'}, 'age': {'value': '0'}, 'size': {'value': '0'}}, (0, 0)), ({'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': '0'}, 'size': {'value': '0'}}, (0, 0))])
async def test_purge_data_no_data_purged(self, conf, expected_return):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv1 = (await q_result('streams'))
_rv2 = (await mock_value(''))
else:
_rv1 = asyncio.ensure_future(q_result('streams'))
_rv2 = asyncio.ensure_future(mock_value(''))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._logger = FLCoreLogger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage_async = MagicMock(spec=StorageClientAsync)
p._readings_storage_async = MagicMock(spec=ReadingsStorageClientAsync)
audit = p._audit
with patch.object(p._storage_async, 'query_tbl_with_payload', return_value=_rv1) as patch_storage:
with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=_rv2):
assert (expected_return == (await p.purge_data(conf)))
p._logger.info.assert_called_once_with('No rows purged')
assert patch_storage.called
assert (2 == patch_storage.call_count)
.parametrize('conf, expected_return', [({'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': '-1'}, 'size': {'value': '-1'}}, (0, 0))])
async def test_purge_error_storage_response(self, conf, expected_return):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv1 = (await q_result('streams'))
_rv2 = (await mock_value(''))
else:
_rv1 = asyncio.ensure_future(q_result('streams'))
_rv2 = asyncio.ensure_future(mock_value(''))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._logger = FLCoreLogger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage_async = MagicMock(spec=StorageClientAsync)
p._readings_storage_async = MagicMock(spec=ReadingsStorageClientAsync)
audit = p._audit
with patch.object(p._storage_async, 'query_tbl_with_payload', return_value=_rv1) as patch_storage:
with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=_rv2):
assert (expected_return == (await p.purge_data(conf)))
assert patch_storage.called
assert (2 == patch_storage.call_count)
.parametrize('conf, expected_error_key', [({'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': 'bla'}, 'size': {'value': '0'}}, 'age'), ({'retainUnsent': {'value': 'retain unsent to all destinations'}, 'age': {'value': '0'}, 'size': {'value': 'bla'}}, 'size')])
async def test_purge_data_invalid_conf(self, conf, expected_error_key):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
expected_error_message = 'purge_data - Configuration item {} bla should be integer!'.format(expected_error_key)
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv1 = (await q_result('streams'))
_rv2 = (await mock_value(''))
else:
_rv1 = asyncio.ensure_future(q_result('streams'))
_rv2 = asyncio.ensure_future(mock_value(''))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._logger = FLCoreLogger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage_async = MagicMock(spec=StorageClientAsync)
p._readings_storage_async = MagicMock(spec=ReadingsStorageClientAsync)
audit = p._audit
with patch.object(p._storage_async, 'query_tbl_with_payload', return_value=_rv1) as patch_storage:
with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=_rv2):
(await p.purge_data(conf))
p._logger.error.assert_called_with(expected_error_message)
assert patch_storage.called
assert (2 == patch_storage.call_count)
async def test_run(self):
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv1 = (await mock_value('Some config'))
_rv2 = (await mock_value((1, 2)))
_rv3 = (await mock_value(None))
else:
_rv1 = asyncio.ensure_future(mock_value('Some config'))
_rv2 = asyncio.ensure_future(mock_value((1, 2)))
_rv3 = asyncio.ensure_future(mock_value(None))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=_rv1) as mock_set_config:
with patch.object(p, 'purge_data', return_value=_rv2) as mock_purge_data:
with patch.object(p, 'write_statistics', return_value=_rv3) as mock_write_stats:
with patch.object(p, 'purge_stats_history', return_value=_rv3) as mock_purge_stats_history:
with patch.object(p, 'purge_audit_trail_log', return_value=_rv3) as mock_purge_audit:
(await p.run())
mock_purge_audit.assert_called_once_with('Some config')
mock_purge_stats_history.assert_called_once_with('Some config')
mock_write_stats.assert_called_once_with(1, 2)
mock_purge_data.assert_called_once_with('Some config')
mock_set_config.assert_called_once_with()
async def test_run_exception(self, event_loop):
async def mock_purge(x):
(await asyncio.sleep(0.1))
raise Exception('')
mock_storage_client_async = MagicMock(spec=StorageClientAsync)
mock_audit_logger = AuditLogger(mock_storage_client_async)
_rv = ((await mock_value('Some config')) if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)) else asyncio.ensure_future(mock_value('Some config')))
with patch.object(FledgeProcess, '__init__'):
with patch.object(mock_audit_logger, '__init__', return_value=None):
p = Purge()
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=_rv):
with patch.object(p, 'purge_data', side_effect=mock_purge):
with patch.object(p, 'write_statistics'):
(await p.run())
assert (1 == p._logger.exception.call_count) |
def test_branch_node():
nm = _get_sample_node_metadata()
task = _workflow.TaskNode(reference_id=_generic_id)
bd = _literals.BindingData(scalar=_literals.Scalar(primitive=_literals.Primitive(integer=5)))
bd2 = _literals.BindingData(scalar=_literals.Scalar(primitive=_literals.Primitive(integer=99)))
binding = _literals.Binding(var='myvar', binding=bd)
binding2 = _literals.Binding(var='myothervar', binding=bd2)
obj = _workflow.Node(id='some:node:id', metadata=nm, inputs=[binding, binding2], upstream_node_ids=[], output_aliases=[], task_node=task)
bn = _workflow.BranchNode(_workflow.IfElseBlock(case=_workflow.IfBlock(condition=_condition.BooleanExpression(comparison=_condition.ComparisonExpression(_condition.ComparisonExpression.Operator.EQ, _condition.Operand(primitive=_literals.Primitive(integer=5)), _condition.Operand(primitive=_literals.Primitive(integer=2)))), then_node=obj), other=[_workflow.IfBlock(condition=_condition.BooleanExpression(conjunction=_condition.ConjunctionExpression(_condition.ConjunctionExpression.LogicalOperator.AND, _condition.BooleanExpression(comparison=_condition.ComparisonExpression(_condition.ComparisonExpression.Operator.EQ, _condition.Operand(primitive=_literals.Primitive(integer=5)), _condition.Operand(primitive=_literals.Primitive(integer=2)))), _condition.BooleanExpression(comparison=_condition.ComparisonExpression(_condition.ComparisonExpression.Operator.EQ, _condition.Operand(primitive=_literals.Primitive(integer=5)), _condition.Operand(primitive=_literals.Primitive(integer=2)))))), then_node=obj)], else_node=obj))
bn2 = _workflow.BranchNode.from_flyte_idl(bn.to_flyte_idl())
assert (bn == bn2)
assert (bn.if_else.case.then_node == obj) |
('reset', cls=FandoghCommand)
('-s', '--service', '--name', 'name', prompt='Service Name')
def service_reset(name):
_RESTART_SERVICE = 'RESTART'
if click.confirm(format_text('Restarting service may cause downtime, are you sure about this action?', TextStyle.WARNING)):
response = request_service_action(name, _RESTART_SERVICE)
click.echo(response['message']) |
class GymConnection(Connection):
connection_id = PUBLIC_ID
def __init__(self, gym_env: Optional[gym.Env]=None, **kwargs: Any) -> None:
super().__init__(**kwargs)
if (gym_env is None):
gym_env_package = cast(str, self.configuration.config.get('env'))
if (gym_env_package is None):
raise ValueError('`env` must be set in configuration!')
gym_env_class = locate(gym_env_package)
gym_env = gym_env_class()
self.channel = GymChannel(self.address, gym_env)
self._connection = None
async def connect(self) -> None:
if self.is_connected:
return
with self._connect_context():
self.channel.logger = self.logger
(await self.channel.connect())
async def disconnect(self) -> None:
if self.is_disconnected:
return
self.state = ConnectionStates.disconnecting
(await self.channel.disconnect())
self.state = ConnectionStates.disconnected
async def send(self, envelope: Envelope) -> None:
self._ensure_connected()
(await self.channel.send(envelope))
async def receive(self, *args: Any, **kwargs: Any) -> Optional['Envelope']:
self._ensure_connected()
try:
envelope = (await self.channel.get())
return envelope
except CancelledError:
return None |
def test_change_parents():
model = DynamicModel()
world = World(initialize_fn=(lambda d: torch.zeros_like(d.sample())))
with world:
model.baz()
assert (model.foo() in world.get_variable(model.baz()).parents)
assert (model.bar(0) in world.get_variable(model.baz()).parents)
assert (model.bar(1) not in world.get_variable(model.baz()).parents)
assert (model.baz() in world.get_variable(model.bar(0)).children)
world2 = world.replace({model.foo(): torch.tensor(1.0)})
assert (model.bar(0) not in world2.get_variable(model.baz()).parents)
assert (model.bar(1) in world2.get_variable(model.baz()).parents)
assert (model.baz() in world2.get_variable(model.bar(1)).children)
assert (model.baz() not in world2.get_variable(model.bar(0)).children) |
class Solution():
def minPathSum(self, grid: List[List[int]]) -> int:
if ((not grid) or (not grid[0])):
return 0
dp = [([0] * len(grid[0])) for _ in range(len(grid))]
for (i, row) in enumerate(grid):
for (j, elem) in enumerate(row):
if ((i == 0) and (j == 0)):
dp[i][j] = grid[i][j]
elif (i == 0):
dp[i][j] = (grid[i][j] + dp[i][(j - 1)])
elif (j == 0):
dp[i][j] = (grid[i][j] + dp[(i - 1)][j])
else:
dp[i][j] = (grid[i][j] + min(dp[i][(j - 1)], dp[(i - 1)][j]))
return dp[(len(grid) - 1)][(len(grid[0]) - 1)] |
class module_file_upload2web():
failed_retrieve_info = 'Failed retrieve web root information'
failed_resolve_path = 'Failed resolve path, please check remote path and permissions'
error_s_not_under_webroot_s = "Error, '%s' is not under the web root folder '%s'"
failed_search_writable_starting_s = "Error searching writable folder under '%s'." |
(name='setup_case')
def fixture_setup_case(tmp_path_factory, source_root, monkeypatch):
def copy_case(path, config_file):
tmp_path = tmp_path_factory.mktemp(path.replace('/', '-'))
shutil.copytree(os.path.join(source_root, 'test-data', path), (tmp_path / 'test_data'))
monkeypatch.chdir((tmp_path / 'test_data'))
return ErtConfig.from_file(config_file)
(yield copy_case) |
class OnAfterRegisterTask(TaskBase):
__name__ = 'on_after_register'
async def run(self, user_id: str, workspace_id: str):
workspace = (await self._get_workspace(uuid.UUID(workspace_id)))
user = (await self._get_user(uuid.UUID(user_id), workspace))
tenant = (await self._get_tenant(user.tenant_id, workspace))
async with self.get_workspace_session(workspace) as session:
role_repository = RoleRepository(session)
user_role_repository = UserRoleRepository(session)
default_roles = (await role_repository.get_granted_by_default())
user_roles: list[UserRole] = []
for role in default_roles:
existing_user_role = (await user_role_repository.get_by_role_and_user(user.id, role.id))
if (existing_user_role is None):
user_roles.append(UserRole(user_id=user.id, role_id=role.id))
user_roles = (await user_role_repository.create_many(user_roles))
for user_role in user_roles:
send_task(on_user_role_created, str(user.id), str(user_role.role_id), str(workspace.id))
context = WelcomeContext(tenant=schemas.tenant.Tenant.model_validate(tenant), user=schemas.user.UserEmailContext.model_validate(user))
async with self._get_email_subject_renderer(workspace) as email_subject_renderer:
subject = (await email_subject_renderer.render(EmailTemplateType.WELCOME, context))
async with self._get_email_template_renderer(workspace) as email_template_renderer:
html = (await email_template_renderer.render(EmailTemplateType.WELCOME, context))
self.email_provider.send_email(sender=tenant.get_email_sender(), recipient=(user.email, None), subject=subject, html=html) |
def test_deposits(concise_casper, funded_accounts, validation_keys, deposit_amount, new_epoch, induct_validators):
induct_validators(funded_accounts, validation_keys, ([deposit_amount] * len(funded_accounts)))
assert (concise_casper.total_curdyn_deposits_in_wei() == (deposit_amount * len(funded_accounts)))
assert (concise_casper.total_prevdyn_deposits_in_wei() == 0) |
def _find_parallel_gemm_ops(cat_inputs: List[Tensor], f_check_src_op: Callable) -> List[Tuple[(List[Operator], int)]]:
all_groups = []
gemm_ops = []
def add_gemm_groups(gemm_ops):
if (len(gemm_ops) >= 2):
all_groups.append(gemm_ops.copy())
for cat_input in cat_inputs:
if (not _is_valid_gemm_op(cat_input, f_check_src_op)):
add_gemm_groups(gemm_ops)
gemm_ops.clear()
else:
gemm_op = list(cat_input.src_ops())[0]
if (len(gemm_ops) == 0):
gemm_ops.append(gemm_op)
continue
if _is_same_shape(gemm_ops[(- 1)], gemm_op):
gemm_ops.append(gemm_op)
else:
add_gemm_groups(gemm_ops)
gemm_ops.clear()
gemm_ops.append(gemm_op)
add_gemm_groups(gemm_ops)
return all_groups |
class SocialAuthMixin():
_auth_mock
('graphql_social_auth.decorators._do_login')
def test_social_auth(self, *args):
response = self.execute({'provider': 'google-oauth2', 'accessToken': '-token-'})
social = response.data['socialAuth']['social']
self.assertEqual('test', social['uid']) |
def get_website_metas(url, html_doc=None):
from bs4 import BeautifulSoup
if (not html_doc):
html_doc = get_website(url)
soup = BeautifulSoup(html_doc, 'html.parser')
meta_tags = soup.find_all('meta')
metas = []
for meta in meta_tags:
if (('name' in meta.attrs) and (meta.attrs['name'] == 'description')):
if meta.attrs['content'].strip():
metas.append((meta.attrs['name'], meta.attrs['content']))
elif (('property' in meta.attrs) and meta.attrs['property'].startswith('og:')):
if meta.attrs['content'].strip():
metas.append((meta.attrs['property'], meta.attrs['content']))
return metas |
class TestGetPrivacyNoticeDetail():
(scope='function')
def url(self, privacy_notice) -> str:
return (V1_URL_PREFIX + PRIVACY_NOTICE_DETAIL.format(privacy_notice_id=privacy_notice.id))
def test_get_privacy_notice_unauthenticated(self, url, api_client):
resp = api_client.get(url)
assert (resp.status_code == 401)
def test_get_privacy_notice_wrong_scope(self, url, api_client: TestClient, generate_auth_header):
auth_header = generate_auth_header(scopes=[scopes.STORAGE_READ])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == 403)
def test_get_invalid_privacy_notice(self, api_client: TestClient, generate_auth_header):
auth_header = generate_auth_header(scopes=[scopes.PRIVACY_NOTICE_READ])
url = (V1_URL_PREFIX + PRIVACY_NOTICE_DETAIL.format(privacy_notice_id='bad'))
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == 404)
def test_get_privacy_notice(self, api_client: TestClient, generate_auth_header, privacy_notice: PrivacyNotice, url):
auth_header = generate_auth_header(scopes=[scopes.PRIVACY_NOTICE_READ])
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == 200)
data = resp.json()
assert (data['id'] == privacy_notice.id)
assert (data['name'] == privacy_notice.name)
assert (data['description'] == privacy_notice.description)
assert (data['origin'] is None)
assert (data['created_at'] == privacy_notice.created_at.isoformat())
assert (data['updated_at'] == privacy_notice.updated_at.isoformat())
for region in data['regions']:
assert (PrivacyNoticeRegion(region) in privacy_notice.regions)
for data_use in data['data_uses']:
assert (data_use in privacy_notice.data_uses)
assert (ConsentMechanism(data['consent_mechanism']) == privacy_notice.consent_mechanism)
assert (EnforcementLevel(data['enforcement_level']) == privacy_notice.enforcement_level)
assert (data['version'] == privacy_notice.version)
assert (data['disabled'] == privacy_notice.disabled)
assert (data['displayed_in_overlay'] == privacy_notice.displayed_in_overlay)
def test_get_privacy_notice_unescaped(self, api_client: TestClient, generate_auth_header):
auth_header = generate_auth_header(scopes=[scopes.PRIVACY_NOTICE_READ, scopes.PRIVACY_NOTICE_CREATE])
maybe_dangerous_description = "user's description <script />"
resp = api_client.post((V1_URL_PREFIX + PRIVACY_NOTICE), headers=auth_header, json=[{'name': 'test privacy notice 1', 'notice_key': 'test_privacy_notice_1', 'description': maybe_dangerous_description, 'regions': [PrivacyNoticeRegion.be.value, PrivacyNoticeRegion.us_ca.value], 'consent_mechanism': ConsentMechanism.opt_in.value, 'data_uses': ['marketing.advertising'], 'enforcement_level': EnforcementLevel.system_wide.value, 'displayed_in_overlay': True}])
print(f'Created Notice: {resp.text}')
assert (resp.status_code == 200)
created_notice = resp.json()[0]
url = (V1_URL_PREFIX + PRIVACY_NOTICE_DETAIL.format(privacy_notice_id=created_notice['id']))
resp = api_client.get(url, headers=auth_header)
assert (resp.status_code == 200)
data = resp.json()
assert (data['description'] == 'user's description <script />')
unescape_header = {'Unescape-Safestr': 'yes'}
auth_and_unescape_header = {**auth_header, **unescape_header}
print(f'Auth & Unescape Headers: {auth_and_unescape_header}')
resp = api_client.get(url, headers=auth_and_unescape_header)
data = resp.json()
print(f'Escaped Data: {data}')
assert (data['description'] == maybe_dangerous_description) |
def bank_deposits():
deposits = {}
for row in reader:
if (row['NAMEFULL'] not in deposits):
deposits[row['NAMEFULL']] = 0
deposits[row['NAMEFULL']] += int(row['DEPSUM'].replace(',', ''))
deplist = []
for (k, v) in deposits.items():
deplist.append({'name': k, 'deposits': v})
deplist.sort(key=(lambda k: k['deposits']))
pprint(deplist) |
def forward(apps, schema_editor):
Reservation = apps.get_model('core', 'Reservation')
Bill = apps.get_model('core', 'Bill')
ReservationBill = apps.get_model('core', 'ReservationBill')
reservations = Reservation.objects.all()
for r in reservations:
r.bill = ReservationBill.objects.create(bill_ptr=r.old_bill)
r.save() |
.external
.skipif((has_openai_key is False), reason='OpenAI API key not available')
def test_ner_config(config: Config):
nlp = assemble_from_config(config)
assert (nlp.pipe_names == ['llm'])
component_cfg = dict(config['components']['llm'])
component_cfg.pop('factory')
nlp2 = spacy.blank('en')
nlp2.add_pipe('llm', config=component_cfg)
assert (nlp2.pipe_names == ['llm'])
pipe = nlp.get_pipe('llm')
assert isinstance(pipe, LLMWrapper)
assert isinstance(pipe.task, LLMTask)
labels = config['components']['llm']['task']['labels']
labels = split_labels(labels)
task = pipe.task
assert isinstance(task, LabeledTask)
assert (sorted(task.labels) == sorted(tuple(labels)))
assert (pipe.labels == task.labels)
assert (nlp.pipe_labels['llm'] == list(task.labels)) |
class Pre(MixHtmlState.HtmlStates, Html.Html):
name = 'Pre formatted text'
tag = 'pre'
_option_cls = OptText.OptionsText
def __init__(self, page: primitives.PageModel, vals, color, width, height, html_code, options, helper, profile):
super(Pre, self).__init__(page, vals, html_code=html_code, profile=profile, options=options, css_attrs={'width': width, 'height': height, 'color': color})
self.css({'text-align': 'left'})
self.add_helper(helper)
def dom(self) -> JsHtml.JsHtmlRich:
if (self._dom is None):
self._dom = JsHtml.JsHtmlRich(self, page=self.page)
return self._dom
def selectable(self, flag: bool=False):
if (not flag):
self.style.add_classes.text.no_selection()
return self
def options(self) -> OptText.OptionsText:
return super().options
def __str__(self):
self.onReady([self.dom.setAttribute('data-content', self.dom.content)])
val = (self.page.py.markdown.all(self.val) if (self.options.showdown is not False) else self.val)
return ('<%s %s>%s</%s>%s' % (self.tag, self.get_attrs(css_class_names=self.style.get_classes()), val, self.tag, self.helper)) |
class EnumTopCalc(Enums):
def concat(self):
return self._set_value()
def count(self):
return self._set_value()
def avg(self, precision: Union[(int, bool)]=None):
if (precision is not None):
if (self.key == 'bottomCalc'):
self._set_value('bottomCalcParams', {'precision': precision})
else:
self._set_value('topCalcParams', {'precision': precision})
return self._set_value()
def max(self, precision: Union[(int, bool)]=None):
if (precision is not None):
if (self.key == 'bottomCalc'):
self._set_value('bottomCalcParams', {'precision': precision})
else:
self._set_value('topCalcParams', {'precision': precision})
return self._set_value()
def min(self, precision: Union[(int, bool)]=None):
if (precision is not None):
if (self.key == 'bottomCalc'):
self._set_value('bottomCalcParams', {'precision': precision})
else:
self._set_value('topCalcParams', {'precision': precision})
return self._set_value()
def sum(self, precision: Union[(int, bool)]=None):
if (precision is not None):
if (self.key == 'bottomCalc'):
self._set_value('bottomCalcParams', {'precision': precision})
else:
self._set_value('topCalcParams', {'precision': precision})
return self._set_value()
def bespoke(self, js_funcs: types.JS_FUNCS_TYPES, profile: types.PROFILE_TYPE=None):
return self._set_value(value=JsUtils.jsConvertFncs(js_funcs, toStr=True, profile=profile), js_type=True) |
class TestSwitchedAction(unittest.TestCase):
def test_action_one_switch_twelve(self):
settings = {'foo': 'bar'}
action = SwitchedActionOne(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'planet': 'Mars'}, switches=[12]))
self.assertEqual([], response.errors)
self.assertEqual({'planet_response': 'Mars', 'settings': settings}, response.body)
def test_action_one_switches_twelve_and_five(self):
settings = {'baz': 'qux'}
action = SwitchedActionOne(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'planet': 'Jupiter'}, switches=[12, 5]))
self.assertEqual([], response.errors)
self.assertEqual({'planet_response': 'Jupiter', 'settings': settings}, response.body)
def test_action_one_switch_five(self):
settings = {'foo': 'bar'}
action = SwitchedActionOne(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'animal': 'cat'}, switches=[5]))
self.assertEqual([], response.errors)
self.assertEqual({'animal_response': 'cat', 'settings': settings}, response.body)
def test_action_no_switches(self):
settings = {'foo': 'bar'}
action = SwitchedActionOne(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'animal': 'cat'}, switches=[]))
self.assertEqual([], response.errors)
self.assertEqual({'animal_response': 'cat', 'settings': settings}, response.body)
def test_action_one_switch_twelve_with_errors(self):
settings = {'foo': 'bar'}
action = SwitchedActionOne(cast(ServerSettings, settings))
with self.assertRaises(ActionError) as error_context:
action(EnrichedActionRequest(action='one', body={'animal': 'cat'}, switches=[12]))
self.assertEqual(2, len(error_context.exception.errors))
self.assertIn(Error('MISSING', 'Missing key: planet', field='planet', is_caller_error=True), error_context.exception.errors)
self.assertIn(Error('UNKNOWN', 'Extra keys present: animal', is_caller_error=True), error_context.exception.errors)
def test_action_two_switch_seven(self):
settings = {'baz': 'qux'}
action = SwitchedActionTwo(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'animal': 'dog'}, switches=[7]))
self.assertEqual([], response.errors)
self.assertEqual({'animal_response': 'dog', 'settings': settings}, response.body)
def test_action_two_switch_twelve(self):
settings = {'foo': 'bar'}
action = SwitchedActionTwo(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'planet': 'Pluto'}, switches=[12]))
self.assertEqual([], response.errors)
self.assertEqual({'planet_response': 'Pluto', 'settings': settings}, response.body)
def test_action_two_no_switches(self):
settings = {'foo': 'bar'}
action = SwitchedActionTwo(cast(ServerSettings, settings))
response = action(EnrichedActionRequest(action='one', body={'building': 'Empire State Building'}, switches=[]))
self.assertEqual([], response.errors)
self.assertEqual({'building_response': 'Empire State Building'}, response.body) |
def draw(G: nx.Graph, pos: Dict[(Hashable, np.ndarray)], lines_func: Callable, color_by: Hashable=None, node_color_by: Hashable=None, lw_by: Hashable=None, alpha_by: Hashable=None, ax=None, encodings_kwargs: Dict={}, **linefunc_kwargs):
nt = node_table(G)
et = edge_table(G)
if (ax is None):
ax = plt.gca()
validate_color_by(G, color_by, node_color_by)
edge_color = edge_colors(et, nt, color_by, node_color_by)
encodings_kwargs = deepcopy(encodings_kwargs)
lw = (line_width(et, lw_by) * encodings_kwargs.pop('lw_scale', 1.0))
alpha_bounds = encodings_kwargs.pop('alpha_bounds', None)
alpha = (transparency(et, alpha_by, alpha_bounds) * encodings_kwargs.pop('alpha_scale', 1.0))
aes_kw = {'facecolor': 'none'}
aes_kw.update(encodings_kwargs)
patches = lines_func(et, pos, edge_color=edge_color, alpha=alpha, lw=lw, aes_kw=aes_kw, **linefunc_kwargs)
for patch in patches:
ax.add_patch(patch) |
class OptionPlotoptionsVectorSonificationDefaultinstrumentoptionsMappingTremolo(Options):
def depth(self) -> 'OptionPlotoptionsVectorSonificationDefaultinstrumentoptionsMappingTremoloDepth':
return self._config_sub_data('depth', OptionPlotoptionsVectorSonificationDefaultinstrumentoptionsMappingTremoloDepth)
def speed(self) -> 'OptionPlotoptionsVectorSonificationDefaultinstrumentoptionsMappingTremoloSpeed':
return self._config_sub_data('speed', OptionPlotoptionsVectorSonificationDefaultinstrumentoptionsMappingTremoloSpeed) |
class RateLimitVerTest(AmbassadorTest):
target: ServiceType
specified_protocol_version: Literal[('v2', 'v3', 'default')]
expected_protocol_version: Literal[('v3', 'invalid')]
rls: ServiceType
def variants(cls) -> Generator[(Node, None, None)]:
for protocol_version in ['v2', 'v3', 'default']:
(yield cls(protocol_version, name='{self.specified_protocol_version}'))
def init(self, protocol_version: Literal[('v2', 'v3', 'default')]):
self.target = HTTP()
self.specified_protocol_version = protocol_version
self.expected_protocol_version = cast(Literal[('v3', 'invalid')], (protocol_version if (protocol_version in ['v3']) else 'invalid'))
self.rls = RLSGRPC(protocol_version=(self.expected_protocol_version if (self.expected_protocol_version != 'invalid') else 'v3'))
def config(self) -> Generator[(Union[(str, Tuple[(Node, str)])], None, None)]:
(yield (self.target, self.format('\n---\napiVersion: getambassador.io/v3alpha1\nkind: Mapping\nname: ratelimit_target_mapping\nhostname: "*"\nprefix: /target/\nservice: {self.target.path.fqdn}\nlabels:\n ambassador:\n - request_label_group:\n - request_headers:\n key: kat-req-rls-allow\n header_name: "kat-req-rls-allow"\n omit_if_not_present: true\n - request_headers:\n key: kat-req-rls-headers-append\n header_name: "kat-req-rls-headers-append"\n omit_if_not_present: true\n')))
(yield (self, (self.format('\n---\napiVersion: getambassador.io/v3alpha1\nkind: RateLimitService\nname: {self.rls.path.k8s}\nservice: "{self.rls.path.fqdn}"\ntimeout_ms: 500\n') + ('' if (self.specified_protocol_version == 'default') else f"protocol_version: '{self.specified_protocol_version}'"))))
def queries(self):
(yield Query(self.url('target/')))
(yield Query(self.url('target/'), expected=200, headers={'kat-req-rls-allow': 'true', 'kat-req-rls-headers-append': 'no header'}))
(yield Query(self.url('target/'), expected=(429 if (self.expected_protocol_version != 'invalid') else 200), headers={'kat-req-rls-allow': 'over my dead body', 'kat-req-rls-headers-append': 'Hello=Foo; Hi=Baz'}))
def check(self):
if (self.expected_protocol_version == 'invalid'):
assert ('Hello' not in self.results[2].headers)
assert ('Hi' not in self.results[2].headers)
assert ('Kat-Resp-Rls-Protocol-Version' not in self.results[2].headers)
return
assert (self.results[2].headers['Hello'] == ['Foo'])
assert (self.results[2].headers['Hi'] == ['Baz'])
assert (self.results[2].headers['Content-Type'] == ['application/json'])
assert (self.results[2].headers['Kat-Resp-Rls-Protocol-Version'] == [self.expected_protocol_version]) |
def create_base_model(inherit_from=models.Model):
class Base(inherit_from, ExtensionsMixin):
class Meta():
abstract = True
_cached_django_content_type = None
def register_regions(cls, *regions):
if hasattr(cls, 'template'):
warnings.warn('Ignoring second call to register_regions.', RuntimeWarning)
return
cls.template = Template('', '', regions)
cls._feincms_all_regions = cls.template.regions
def register_templates(cls, *templates):
if (not hasattr(cls, '_feincms_templates')):
cls._feincms_templates = OrderedDict()
cls.TEMPLATES_CHOICES = []
instances = cls._feincms_templates
for template in templates:
if (not isinstance(template, Template)):
template = Template(**template)
instances[template.key] = template
try:
field = next(iter((field for field in cls._meta.local_fields if (field.name == 'template_key'))))
except (StopIteration,):
cls.add_to_class('template_key', ChoicesCharField(_('template'), max_length=255))
field = next(iter((field for field in cls._meta.local_fields if (field.name == 'template_key'))))
def _template(self):
try:
return self._feincms_templates[self.template_key]
except KeyError:
return self._feincms_templates[list(self._feincms_templates.keys())[0]]
cls.template = property(_template)
cls.TEMPLATE_CHOICES = [(template_.key, template_.title) for template_ in cls._feincms_templates.values()]
field.choices = cls.TEMPLATE_CHOICES
field.default = cls.TEMPLATE_CHOICES[0][0]
cls._feincms_all_regions = set()
for template in cls._feincms_templates.values():
cls._feincms_all_regions.update(template.regions)
content_proxy_class = ContentProxy
def content(self):
if (not hasattr(self, '_content_proxy')):
self._content_proxy = self.content_proxy_class(self)
return self._content_proxy
def _create_content_base(cls):
cls._needs_templates()
class Meta():
abstract = True
app_label = cls._meta.app_label
ordering = ['ordering']
def __str__(self):
return ('%s<pk=%s, parent=%s<pk=%s, %s>, region=%s, ordering=%d>' % (self.__class__.__name__, self.pk, self.parent.__class__.__name__, self.parent.pk, self.parent, self.region, self.ordering))
def render(self, **kwargs):
render_fn = getattr(self, ('render_%s' % self.region), None)
if render_fn:
return render_fn(**kwargs)
raise NotImplementedError
def get_queryset(cls, filter_args=None):
qs = cls.objects.select_related()
if (filter_args is not None):
return qs.filter(filter_args)
return qs
attrs = {'__module__': cls.__module__, '__str__': __str__, 'render': render, 'get_queryset': classmethod(get_queryset), 'Meta': Meta, 'parent': models.ForeignKey(cls, related_name='%(class)s_set', on_delete=models.CASCADE), 'region': models.CharField(max_length=255), 'ordering': models.IntegerField(_('ordering'), default=0)}
name = ('_Internal%sContentTypeBase' % cls.__name__)
if hasattr(sys.modules[cls.__module__], name):
warnings.warn(('The class %s.%s has the same name as the class that FeinCMS auto-generates based on %s.%s. To avoid databaseerrors and import clashes, rename one of these classes.' % (cls.__module__, name, cls.__module__, cls.__name__)), RuntimeWarning)
cls._feincms_content_model = type(str(name), (models.Model,), attrs)
cls._feincms_content_types = []
cls._feincms_content_types_with_process = []
cls._feincms_content_types_with_finalize = []
if hasattr(cls, 'feincms_item_editor_context_processors'):
cls.feincms_item_editor_context_processors = list(cls.feincms_item_editor_context_processors)
else:
cls.feincms_item_editor_context_processors = []
if hasattr(cls, 'feincms_item_editor_includes'):
cls.feincms_item_editor_includes = dict(cls.feincms_item_editor_includes)
else:
cls.feincms_item_editor_includes = {}
def create_content_type(cls, model, regions=None, class_name=None, **kwargs):
if (not class_name):
class_name = model.__name__
try:
getattr(cls, ('%s_set' % class_name.lower()))
warnings.warn(('Cannot create content type using %s.%s for %s.%s, because %s_set is already taken.' % (model.__module__, class_name, cls.__module__, cls.__name__, class_name.lower())), RuntimeWarning)
return
except AttributeError:
pass
if (not model._meta.abstract):
raise ImproperlyConfigured('Cannot create content type from non-abstract model (yet).')
if (not hasattr(cls, '_feincms_content_model')):
cls._create_content_base()
feincms_content_base = cls._feincms_content_model
class Meta(feincms_content_base.Meta):
db_table = f'{cls._meta.db_table}_{class_name.lower()}'
verbose_name = model._meta.verbose_name
verbose_name_plural = model._meta.verbose_name_plural
permissions = model._meta.permissions
attrs = {'__module__': cls.__module__, 'Meta': Meta}
new_type = type(str(class_name), (model, feincms_content_base), attrs)
cls._feincms_content_types.append(new_type)
if hasattr(getattr(new_type, 'process', None), '__call__'):
cls._feincms_content_types_with_process.append(new_type)
if hasattr(getattr(new_type, 'finalize', None), '__call__'):
cls._feincms_content_types_with_finalize.append(new_type)
if (not regions):
regions = {region.key for region in cls._feincms_all_regions}
for region in cls._feincms_all_regions:
if (region.key in regions):
region._content_types.append(new_type)
if (not hasattr(model, '_feincms_content_models')):
model._feincms_content_models = []
model._feincms_content_models.append(new_type)
new_type._feincms_content_class = cls
optgroup = kwargs.pop('optgroup', None)
if optgroup:
new_type.optgroup = optgroup
if hasattr(new_type, 'initialize_type'):
new_type.initialize_type(**kwargs)
else:
for (k, v) in kwargs.items():
setattr(new_type, k, v)
if hasattr(model, 'feincms_item_editor_context_processors'):
cls.feincms_item_editor_context_processors.extend(model.feincms_item_editor_context_processors)
if hasattr(model, 'feincms_item_editor_includes'):
for (key, incls) in model.feincms_item_editor_includes.items():
cls.feincms_item_editor_includes.setdefault(key, set()).update(incls)
return new_type
def _django_content_type(self):
if (not getattr(self, '_cached_django_content_type', None)):
ct = ContentType.objects.get_for_model(self)
self.__class__._cached_django_content_type = ct
return self.__class__._cached_django_content_type
def content_type_for(cls, model):
if ((not hasattr(cls, '_feincms_content_types')) or (not cls._feincms_content_types)):
return None
for type in cls._feincms_content_types:
if issubclass(type, model):
if (type.__base__ is model):
return type
return None
def _needs_templates(cls):
if (not hasattr(cls, 'template')):
raise ImproperlyConfigured(('You need to register at least one template or one region on %s.' % cls.__name__))
def _needs_content_types(cls):
if (not getattr(cls, '_feincms_content_types', None)):
raise ImproperlyConfigured(('You need to create at least one content type for the %s model.' % cls.__name__))
def copy_content_from(self, obj):
for cls in self._feincms_content_types:
for content in cls.objects.filter(parent=obj):
new = copy_model_instance(content, exclude=('id', 'parent'))
new.parent = self
new.save()
def replace_content_with(self, obj):
for cls in self._feincms_content_types:
cls.objects.filter(parent=self).delete()
self.copy_content_from(obj)
def register_with_reversion(cls, **kwargs):
try:
from reversion.revisions import register
except ImportError:
try:
from reversion import register
except ImportError:
raise OSError('django-reversion is not installed')
follow = []
for content_type in cls._feincms_content_types:
follow.append(('%s_set' % content_type.__name__.lower()))
register(content_type, **kwargs)
register(cls, follow=follow, **kwargs)
return Base |
class Connections():
def __init__(self) -> None:
self.connections: Dict[(str, Union[(BaseConnector, BaseEmailConnector)])] = {}
def get_connector(self, connection_config: ConnectionConfig) -> Union[(BaseConnector, BaseEmailConnector)]:
key = connection_config.key
if (key not in self.connections):
connector = Connections.build_connector(connection_config)
self.connections[key] = connector
return self.connections[key]
def build_connector(connection_config: ConnectionConfig) -> Union[(BaseConnector, BaseEmailConnector)]:
if (connection_config.connection_type == ConnectionType.postgres):
return PostgreSQLConnector(connection_config)
if (connection_config.connection_type == ConnectionType.mongodb):
return MongoDBConnector(connection_config)
if (connection_config.connection_type == ConnectionType.mysql):
return MySQLConnector(connection_config)
if (connection_config.connection_type == ConnectionType.snowflake):
return SnowflakeConnector(connection_config)
if (connection_config.connection_type == ConnectionType.redshift):
return RedshiftConnector(connection_config)
if (connection_config.connection_type == ConnectionType.mssql):
return MicrosoftSQLServerConnector(connection_config)
if (connection_config.connection_type == ConnectionType.mariadb):
return MariaDBConnector(connection_config)
if (connection_config.connection_type == ConnectionType.bigquery):
return BigQueryConnector(connection_config)
if (connection_config.connection_type == ConnectionType.saas):
return SaaSConnector(connection_config)
if (connection_config.connection_type == ConnectionType.manual):
return ManualConnector(connection_config)
if (connection_config.connection_type == ConnectionType.timescale):
return TimescaleConnector(connection_config)
if (connection_config.connection_type == ConnectionType.dynamodb):
return DynamoDBConnector(connection_config)
if (connection_config.connection_type == ConnectionType.fides):
return FidesConnector(connection_config)
raise NotImplementedError(f'No connector available for {connection_config.connection_type}')
def close(self) -> None:
for connector in self.connections.values():
if isinstance(connector, BaseConnector):
connector.close() |
class TestRemoveSkillFailsWhenExceptionOccurs():
def setup_class(cls):
cls.runner = CliRunner()
cls.agent_name = 'myagent'
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path('packages')
tmp_dir = (cls.t / dir_path)
src_dir = (cls.cwd / Path(ROOT_DIR, dir_path))
shutil.copytree(str(src_dir), str(tmp_dir))
cls.skill_id = str(GYM_SKILL_PUBLIC_ID)
cls.skill_name = 'gym'
os.chdir(cls.t)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'init', '--author', AUTHOR])
assert (result.exit_code == 0)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'create', '--local', cls.agent_name], standalone_mode=False)
assert (result.exit_code == 0)
os.chdir(cls.agent_name)
config = AgentConfig.from_json(yaml.safe_load(open(DEFAULT_AEA_CONFIG_FILE)))
config.registry_path = os.path.join(ROOT_DIR, 'packages')
yaml.safe_dump(dict(config.json), open(DEFAULT_AEA_CONFIG_FILE, 'w'))
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'skill', cls.skill_id], standalone_mode=False)
assert (result.exit_code == 0)
cls.patch = unittest.mock.patch('shutil.rmtree', side_effect=BaseException('an exception'))
cls.patch.start()
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'remove', 'skill', cls.skill_name], standalone_mode=False)
def test_exit_code_equal_to_1(self):
assert (self.result.exit_code == 1)
def teardown_class(cls):
cls.patch.stop()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass |
class TransactionDecorator(object):
def __init__(self, user):
self.user = user
def __call__(self, fn):
(fn)
def wrapper(fn, fn_self, *args):
user = getattr(fn_self, self.user)
with fn_self.setup_user_session(user):
return fn(fn_self, *args)
return decorator.decorator(wrapper, fn) |
('duplicate', dataset=('The dataset to save to', 'positional', None, str), file_path=('The jsonl file with matched items', 'positional', None, str))
def check_duplicate(dataset, file_path):
stream = JSONL(file_path)
stream = add_options(stream)
return {'dataset': dataset, 'view_id': 'choice', 'stream': stream} |
class TicketFactoryBase(BaseFactory):
class Meta():
model = Ticket
name = common.string_
description = common.string_
type = common.string_
price = common.float_
quantity = 10
is_description_visible = True
position = 10
is_fee_absorbed = True
sales_starts_at = common.date_
sales_ends_at = common.dateEnd_
is_hidden = True
min_order = 0
max_order = 10 |
def test_sync_filter_against_log_events(w3, emitter, wait_for_transaction, emitter_contract_event_ids):
txn_filter = w3.eth.filter({})
txn_hashes = set()
txn_hashes.add(emitter.functions.logNoArgs(emitter_contract_event_ids.LogNoArguments).transact())
for txn_hash in txn_hashes:
wait_for_transaction(w3, txn_hash)
seen_logs = txn_filter.get_new_entries()
assert (txn_hashes == {log['transactionHash'] for log in seen_logs}) |
.feature('unit')
.story('core', 'api', 'schedule')
class TestScheduledProcesses():
def client(self, loop, test_client):
app = web.Application(loop=loop)
routes.setup(app)
return loop.run_until_complete(test_client(app))
def setup_method(self):
server.Server.scheduler = Scheduler(None, None)
def teardown_method(self):
server.Server.scheduler = None
async def test_get_scheduled_processes(self, client):
async def mock_coro():
processes = []
process = ScheduledProcess()
process.name = 'foo'
process.script = 'bar'
processes.append(process)
return processes
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv = (await mock_coro())
else:
_rv = asyncio.ensure_future(mock_coro())
with patch.object(server.Server.scheduler, 'get_scheduled_processes', return_value=_rv):
resp = (await client.get('/fledge/schedule/process'))
assert (200 == resp.status)
result = (await resp.text())
json_response = json.loads(result)
assert ({'processes': ['foo']} == json_response)
async def test_get_scheduled_process(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
payload = '{"return": ["name"], "where": {"column": "name", "condition": "in", "value": ["purge"]}}'
response = {'rows': [{'name': 'purge'}], 'count': 1}
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv = (await mock_coro_response(response))
else:
_rv = asyncio.ensure_future(mock_coro_response(response))
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv) as mock_storage_call:
resp = (await client.get('/fledge/schedule/process/purge'))
assert (200 == resp.status)
result = (await resp.text())
json_response = json.loads(result)
assert ('purge' == json_response)
mock_storage_call.assert_called_with('scheduled_processes', payload)
async def test_get_scheduled_process_bad_data(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'rows': [], 'count': 0}
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv = (await mock_coro_response(response))
else:
_rv = asyncio.ensure_future(mock_coro_response(response))
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv):
resp = (await client.get('/fledge/schedule/process/bla'))
assert (404 == resp.status)
assert ("No such Scheduled Process: ['bla']." == resp.reason)
async def test_post_scheduled_process(self, client):
payload = {'process_name': 'manage', 'script': '["tasks/manage"]'}
storage_client_mock = MagicMock(StorageClientAsync)
response = {'rows': [], 'count': 0}
ret_val = {'response': 'inserted', 'rows_affected': 1}
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv1 = (await mock_coro_response(response))
_rv2 = (await mock_coro_response(ret_val))
_rv3 = (await mock_coro_response(None))
else:
_rv1 = asyncio.ensure_future(mock_coro_response(response))
_rv2 = asyncio.ensure_future(mock_coro_response(ret_val))
_rv3 = asyncio.ensure_future(mock_coro_response(None))
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv1) as query_tbl_patch:
with patch.object(storage_client_mock, 'insert_into_tbl', return_value=_rv2) as insert_tbl_patch:
with patch.object(server.Server.scheduler, '_get_process_scripts', return_value=_rv3) as get_process_script_patch:
resp = (await client.post('/fledge/schedule/process', data=json.dumps(payload)))
assert (200 == resp.status)
result = (await resp.text())
json_response = json.loads(result)
assert ({'message': '{} process name created successfully.'.format(payload['process_name'])} == json_response)
get_process_script_patch.assert_called_once_with()
assert insert_tbl_patch.called
(args, kwargs) = insert_tbl_patch.call_args_list[0]
assert ('scheduled_processes' == args[0])
assert ({'name': 'manage', 'script': '["tasks/manage"]'} == json.loads(args[1]))
assert query_tbl_patch.called
(args, kwargs) = query_tbl_patch.call_args_list[0]
assert ('scheduled_processes' == args[0])
assert ({'return': ['name'], 'where': {'column': 'name', 'condition': '=', 'value': 'manage'}} == json.loads(args[1]))
.parametrize('request_data, response_code, error_message', [({}, 400, 'Missing process_name property in payload.'), ({'process_name': ''}, 400, 'Missing script property in payload.'), ({'script': ''}, 400, 'Missing process_name property in payload.'), ({'processName': '', 'script': ''}, 400, 'Missing process_name property in payload.'), ({'process_name': '', 'script': '["tasks/statistics"]'}, 400, 'Process name cannot be empty.'), ({'process_name': 'new', 'script': ''}, 400, 'Script cannot be empty.'), ({'process_name': ' ', 'script': '["tasks/statistics"]'}, 400, 'Process name cannot be empty.'), ({'process_name': ' new', 'script': ' '}, 400, 'Script cannot be empty.'), ({'process_name': 'purge', 'script': '["tasks/purge"]'}, 400, 'purge process name already exists.')])
async def test_post_scheduled_process_bad_data(self, client, request_data, response_code, error_message):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'rows': [{'name': 'purge'}], 'count': 1}
if ((sys.version_info.major == 3) and (sys.version_info.minor >= 8)):
_rv = (await mock_coro_response(response))
else:
_rv = asyncio.ensure_future(mock_coro_response(response))
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv):
resp = (await client.post('/fledge/schedule/process', data=json.dumps(request_data)))
assert (response_code == resp.status)
assert (error_message == resp.reason)
result = (await resp.text())
json_response = json.loads(result)
assert ({'message': error_message} == json_response) |
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if (gridinfo.tile_type not in ['PCIE_BOT']):
continue
for (site_name, site_type) in gridinfo.sites.items():
return (site_name, site_type) |
def get_gamatoto_helpers(is_jp: bool) -> Optional[dict[(str, Any)]]:
if is_jp:
country_code = 'ja'
else:
country_code = 'en'
file_data = game_data_getter.get_file_latest('resLocal', f'GamatotoExpedition_Members_name_{country_code}.csv', is_jp)
if (file_data is None):
helper.error_text('Failed to get gamatoto helper data')
return None
data = file_data.decode('utf-8').splitlines()[1:]
helpers: dict[(str, Any)] = {}
for line in data:
line_data = line.split(helper.get_text_splitter(is_jp))
if (len(line_data) < 5):
break
helper_id = line_data[0]
rarity = int(line_data[1])
type_str = line_data[4]
helpers[helper_id] = {'Rarity_id': rarity, 'Rarity_name': type_str}
return helpers |
class SequentialRolloutRunner(RolloutRunner):
def __init__(self, n_episodes: int, max_episode_steps: int, deterministic: bool, record_trajectory: bool, record_event_logs: bool, render: bool):
super().__init__(n_episodes=n_episodes, max_episode_steps=max_episode_steps, deterministic=deterministic, record_trajectory=record_trajectory, record_event_logs=record_event_logs)
if render:
assert record_trajectory, 'Rendering is supported only when trajectory recording is enabled.'
self.render = render
self.progress_bar = None
(RolloutRunner)
def run_with(self, env: ConfigType, wrappers: CollectionOfConfigType, agent: ConfigType):
env_seeds = self.maze_seeding.get_explicit_env_seeds(self.n_episodes)
agent_seeds = self.maze_seeding.get_explicit_agent_seeds(self.n_episodes)
(env, agent) = self.init_env_and_agent(env_config=env, wrappers_config=wrappers, input_dir=self.input_dir, max_episode_steps=self.max_episode_steps, agent_config=agent)
register_log_stats_writer(LogStatsWriterConsole())
if (not isinstance(env, LogStatsWrapper)):
env = LogStatsWrapper.wrap(env, logging_prefix='rollout_data')
if self.record_event_logs:
LogEventsWriterRegistry.register_writer(LogEventsWriterTSV(log_dir='./event_logs'))
if self.record_trajectory:
TrajectoryWriterRegistry.register_writer(TrajectoryWriterFile(log_dir='./trajectory_data'))
if (not isinstance(env, TrajectoryRecordingWrapper)):
env = TrajectoryRecordingWrapper.wrap(env)
actual_number_of_episodes = min(len(env_seeds), self.n_episodes)
if (actual_number_of_episodes < self.n_episodes):
BColors.print_colored(f'Only {len(env_seeds)} explicit seed(s) given, thus the number of episodes changed from: {self.n_episodes} to {actual_number_of_episodes}.', BColors.WARNING)
self.progress_bar = tqdm(desc='Episodes done', unit=' episodes', total=actual_number_of_episodes)
RolloutRunner.run_interaction_loop(env, agent, actual_number_of_episodes, render=self.render, after_reset_callback=(lambda : self.update_progress()), env_seeds=env_seeds, agent_seeds=agent_seeds, deterministic=self.deterministic)
self.progress_bar.close()
env.write_epoch_stats()
def update_progress(self):
self.progress_bar.update() |
def _get_mnist(conf, root, split, transform, target_transform, download):
is_train = (split == 'train')
normalize = (transforms.Normalize((0.1307,), (0.3081,)) if conf.pn_normalize else None)
transform = transforms.Compose(([transforms.ToTensor()] + ([normalize] if (normalize is not None) else [])))
return datasets.MNIST(root=root, train=is_train, transform=transform, target_transform=target_transform, download=download) |
class AIOKafkaConsumerThreadFixtures():
()
def cthread(self, *, consumer):
return AIOKafkaConsumerThread(consumer)
()
def tracer(self, *, app):
tracer = app.tracer = Mock(name='tracer')
tobj = tracer.get_tracer.return_value
def start_span(operation_name=None, **kwargs):
span = opentracing.Span(tracer=tobj, context=opentracing.SpanContext())
if (operation_name is not None):
span.operation_name = operation_name
assert (span.operation_name == operation_name)
return span
tobj.start_span = start_span
return tracer
()
def _consumer(self):
return Mock(name='AIOKafkaConsumer', autospec=aiokafka.AIOKafkaConsumer, start=AsyncMock(), stop=AsyncMock(), commit=AsyncMock(), position=AsyncMock(), end_offsets=AsyncMock(), _client=Mock(name='Client', close=AsyncMock()), _coordinator=Mock(name='Coordinator', close=AsyncMock()))
()
def now(self):
return
()
def tp(self):
return TP('foo', 30)
()
def aiotp(self, *, tp):
return TopicPartition(tp.topic, tp.partition)
()
def logger(self, *, cthread):
cthread.log = Mock(name='cthread.log')
return cthread.log |
class fartBinForm(QDialog, Ui_FartBinDialog):
def __init__(self, parent=None):
super(fartBinForm, self).__init__(parent)
self.setupUi(self)
self.setWindowOpacity(0.93)
self.btnSubmit.clicked.connect(self.submit)
self.btnSelectBinPath.clicked.connect(self.selectBinPath)
self.btnSelectDexPath.clicked.connect(self.selectDexPath)
self.btnSubmitJar.clicked.connect(self.submitJar)
self.examplePath = (os.getcwd() + '/example/')
self._translate = QtCore.QCoreApplication.translate
def selectBinPath(self):
(fileName_choose, filetype) = QFileDialog.getOpenFileName(self, 'select file', self.examplePath, 'Bin Files (*.bin);;All Files (*)')
if (fileName_choose == ''):
return
self.txtBinPath.setText(fileName_choose)
def selectDexPath(self):
(fileName_choose, filetype) = QFileDialog.getOpenFileName(self, 'select file', self.examplePath, 'Dex Files (*.dex);;All Files (*)')
if (fileName_choose == ''):
return
self.txtDexPath.setText(fileName_choose)
def appendLog(self, data):
self.txtResult.appendPlainText(data)
def submit(self):
if ((len(self.txtDexPath.text()) <= 0) or (os.path.exists(self.txtDexPath.text()) == False)):
QMessageBox().information(self, 'hint', self._translate('fartBinForm', 'dex'))
return
if ((len(self.txtBinPath.text()) <= 0) or (os.path.exists(self.txtBinPath.text()) == False)):
QMessageBox().information(self, 'hint', self._translate('fartBinForm', 'bin'))
return
self.th = FartThread(self.txtDexPath.text(), self.txtBinPath.text())
self.th.loggerSignel.connect(self.appendLog)
self.th.start()
def submitJar(self):
if ((len(self.txtDexPath.text()) <= 0) or (os.path.exists(self.txtDexPath.text()) == False)):
QMessageBox().information(self, 'hint', self._translate('fartBinForm', 'dex'))
return
if ((len(self.txtBinPath.text()) <= 0) or (os.path.exists(self.txtBinPath.text()) == False)):
QMessageBox().information(self, 'hint', self._translate('fartBinForm', 'bin'))
return
(filepath, fileext) = os.path.splitext(self.txtDexPath.text())
cmd = ('java -jar ./exec/dexfixer.jar %s %s %s' % (self.txtDexPath.text(), self.txtBinPath.text(), ((filepath + '_repair') + fileext)))
res = CmdUtil.exec(cmd)
if ('error' in res):
QMessageBox().information(self, 'hint', (self._translate('fartBinForm', ',') + res))
return
QMessageBox().information(self, 'hint', res) |
class MatrixBase(ufl.Matrix):
def __init__(self, a, bcs, mat_type):
if isinstance(a, tuple):
self.a = None
(test, trial) = a
arguments = a
else:
self.a = a
(test, trial) = a.arguments()
arguments = None
ufl.Matrix.__init__(self, test.function_space(), trial.function_space())
self._arguments = arguments
self.bcs = bcs
self.comm = test.function_space().comm
self._comm = internal_comm(self.comm)
self.block_shape = (len(test.function_space()), len(trial.function_space()))
self.mat_type = mat_type
def arguments(self):
if self.a:
return self.a.arguments()
else:
return self._arguments
def __del__(self):
if hasattr(self, '_comm'):
decref(self._comm)
def has_bcs(self):
return (self._bcs != ())
def bcs(self):
return self._bcs
def bcs(self, bcs):
if (bcs is not None):
self._bcs = tuple(itertools.chain(*(as_tuple(bc) for bc in bcs)))
else:
self._bcs = ()
def __repr__(self):
return ('%s(a=%r, bcs=%r)' % (type(self).__name__, self.a, self.bcs))
def __str__(self):
return ('assembled %s(a=%s, bcs=%s)' % (type(self).__name__, self.a, self.bcs)) |
class LinkedinUser(scrapy.Item):
lastName = scrapy.Field()
firstName = scrapy.Field()
locale = scrapy.Field()
headline = scrapy.Field()
linkedinUrl = scrapy.Field()
connection_msg = scrapy.Field()
email_address = scrapy.Field()
phone_numbers = scrapy.Field()
education = scrapy.Field()
experience = scrapy.Field()
industryName = scrapy.Field()
geoLocationName = scrapy.Field() |
class OptionPlotoptionsVariablepieSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class GUIServerController(ControllerBase):
def __init__(self, req, link, data, **config):
super(GUIServerController, self).__init__(req, link, data, **config)
path = ('%s/html/' % PATH)
self.static_app = DirectoryApp(path)
('topology', '/{filename:[^/]*}')
def static_handler(self, req, **kwargs):
if kwargs['filename']:
req.path_info = kwargs['filename']
return self.static_app(req) |
class T(configparser.ConfigParser):
def __init__(self, file):
_shared_formula_dir = T.get_data_path('formulas')
_shared_map_dir = T.get_data_path('maps')
comp = 'gcc'
_defaults = OrderedDict((('compiler', OrderedDict((('name', comp), ('options', self.get_default_compiler_options())))), ('optimize', OrderedDict((('peephole', '1'),))), ('main_window', OrderedDict((('width', '933'), ('height', '594')))), ('display', OrderedDict((('width', '640'), ('height', '480'), ('antialias', '1'), ('autodeepen', '1'), ('autotolerance', '1')))), ('helpers', OrderedDict((('editor', self.get_default_editor()), ('mailer', self.get_default_mailer()), ('browser', self.get_default_browser())))), ('general', OrderedDict((('threads', '1'), ('compress_fct', '1'), ('data_dir', os.path.expandvars('${HOME}/gnofract4d')), ('cache_dir', os.path.expandvars('${HOME}/.gnofract4d-cache'))))), ('user_info', OrderedDict((('name', ''), ('nsid', '')))), ('blogs', OrderedDict()), ('formula_path', OrderedDict.fromkeys(('formulas', _shared_formula_dir, os.path.expandvars('${HOME}/gnofract4d/formulas')))), ('map_path', OrderedDict.fromkeys(('maps', _shared_map_dir, os.path.expandvars('${HOME}/gnofract4d/maps'), '/usr/share/gimp/2.0/gradients'))), ('recent_files', OrderedDict()), ('ignored', OrderedDict()), ('director', OrderedDict((('fct_enabled', '0'), ('fct_dir', '/tmp'), ('png_dir', '/tmp'))))))
self.image_changed_sections = {'display': True, 'compiler': True}
configparser.ConfigParser.__init__(self, allow_no_value=True, interpolation=None)
self.read_dict(_defaults)
self.file = os.path.expanduser(file)
self.read(self.file)
self.update_paths('formula_path')
self.update_paths('map_path')
self.ensure_contains('formula_path', _shared_formula_dir)
self.ensure_contains('map_path', _shared_map_dir)
def update_paths(self, section):
for (key, value) in self[section].items():
if (value is not None):
self[section][value] = None
del self[section][key]
def ensure_contains(self, section, required_item):
l = self.get_list(section)
if (not l.count(required_item)):
l.append(required_item)
self.set_list(section, l)
def get_data_path(subpath=''):
moduledir = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.normpath(os.path.join(moduledir, '../../../../share/gnofract4d', subpath))
return path
def find_resource(name, resource_dir):
local_name = os.path.join(resource_dir, name)
if os.path.exists(local_name):
return local_name
full_name = os.path.join(T.get_data_path(resource_dir), name)
if os.path.exists(full_name):
return full_name
return full_name
def get_default_editor(self):
return 'emacs'
def get_default_mailer(self):
return 'evolution %s'
def get_default_browser(self):
return 'firefox %s'
def get_default_compiler_options(self):
return '-fPIC -DPIC -O2 -shared -ffast-math'
def optionxform(self, option):
return str(option)
def set(self, section, key, val):
if (self.has_section(section) and self.has_option(section, key) and (self.get(section, key) == val)):
return
configparser.ConfigParser.set(self, section, key, val)
self.changed(section)
def set_size(self, width, height):
if ((self.getint('display', 'height') == height) and (self.getint('display', 'width') == width)):
return
configparser.ConfigParser.set(self, 'display', 'height', str(height))
configparser.ConfigParser.set(self, 'display', 'width', str(width))
self.changed('display')
def set_main_window_size(self, width, height):
if ((self.getint('main_window', 'height') == height) and (self.getint('main_window', 'width') == width)):
return
configparser.ConfigParser.set(self, 'main_window', 'height', str(height))
configparser.ConfigParser.set(self, 'main_window', 'width', str(width))
self.changed('main_window')
def get_list(self, name):
return list(self[name])
def remove_all_in_list_section(self, name):
for item in self[name]:
self.remove_option(name, item)
def set_list(self, name, list):
self.remove_all_in_list_section(name)
for item in list:
configparser.ConfigParser.set(self, name, item, None)
self.changed(name)
def changed(self, section):
pass
def save(self):
with open(self.file, 'w') as f:
self.write(f) |
class OptionSeriesItemSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
('tomate.plugin', scope=SingletonScope)
class PluginEngine():
(bus='tomate.bus', config='tomate.config', graph=Graph)
def __init__(self, bus: Bus, config: Config, graph: Graph):
self._bus = bus
self._graph = graph
logger.debug('action=init paths=%s', config.plugin_paths())
self._plugin_manager = ConfigurablePluginManager(decorated_manager=VersionedPluginManager())
self._plugin_manager.setPluginPlaces(config.plugin_paths())
self._plugin_manager.setPluginInfoExtension('plugin')
self._plugin_manager.setConfigParser(config.parser, config.save)
def collect(self) -> None:
logger.debug('action=collect')
self._plugin_manager.locatePlugins()
self._plugin_manager.loadPlugins(callback_after=self._configure_plugin)
def _configure_plugin(self, plugin: PluginInfo) -> None:
if (plugin.error is None):
plugin.plugin_object.configure(self._bus, self._graph)
def deactivate(self, name: str) -> None:
self._plugin_manager.deactivatePluginByName(name)
def activate(self, name: str) -> None:
self._plugin_manager.activatePluginByName(name)
def all(self) -> List[PluginInfo]:
logger.debug('action=all')
return sorted(self._plugin_manager.getAllPlugins(), key=(lambda info: info.name))
def lookup(self, name: str, category='Default') -> Optional[PluginInfo]:
logger.debug('action=lookup name=%s category=%s', name, category)
return self._plugin_manager.getPluginByName(name, category)
def has_plugins(self) -> bool:
has = (len(self.all()) > 0)
logger.debug('action=has_plugin has=%s', has)
return has
def remove(self, plugin: object, category='Default') -> None:
self._plugin_manager.removePluginFromCategory(plugin, category) |
class CustomBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
user = User.objects.filter((Q(username=username) | Q(email=username))).first()
if user:
if user.check_password(password):
return user
return None |
def _discover_dependencies(test: Union[(Metric, Test)]) -> Iterator[Tuple[(str, Union[(Metric, Test)])]]:
if hasattr(test, '__evidently_dependencies__'):
(yield from test.__evidently_dependencies__())
return
for (field_name, field) in test.__dict__.items():
if issubclass(type(field), (Metric, Test)):
(yield (field_name, field)) |
_settings(TIME_ZONE='UTC', USE_TZ=True)
class TestDefaultTZDateTimeField(TestCase):
def setup_class(cls):
cls.field = serializers.DateTimeField()
cls.kolkata = ZoneInfo('Asia/Kolkata')
def assertUTC(self, tzinfo):
assert ((tzinfo is utc) or ((getattr(tzinfo, 'key', None) or getattr(tzinfo, 'zone', None)) == 'UTC'))
def test_default_timezone(self):
self.assertUTC(self.field.default_timezone())
def test_current_timezone(self):
self.assertUTC(self.field.default_timezone())
activate(self.kolkata)
assert (self.field.default_timezone() == self.kolkata)
deactivate()
self.assertUTC(self.field.default_timezone()) |
.skipif((arg_chip in ['esp8266', 'esp32']), reason='get_security_info command is supported on ESP32S2 and later')
class TestSecurityInfo(EsptoolTestCase):
def test_show_security_info(self):
res = self.run_esptool('get_security_info')
assert ('Flags' in res)
assert ('Crypt Count' in res)
assert ('Key Purposes' in res)
if (arg_chip != 'esp32s2'):
try:
esp = esptool.get_default_connected_device([arg_port], arg_port, 10, 115200, arg_chip)
assert (f'Chip ID: {esp.IMAGE_CHIP_ID}' in res)
assert ('API Version' in res)
finally:
esp._port.close()
assert ('Secure Boot' in res)
assert ('Flash Encryption' in res) |
class TestCoprUpdatePermissions(CoprsTestCase):
('u2')
def test_cancel_permission(self, f_users, f_coprs, f_copr_permissions, f_db):
self.db.session.add_all([self.u2, self.c2])
r = self.test_client.post('/coprs/{0}/{1}/update_permissions/'.format(self.u2.name, self.c2.name), data={'copr_builder_1': '0'}, follow_redirects=True)
check_string = '<select id="copr_builder_1" name="copr_builder_1"><option value="0">nothing</option><option value="1">request</option><option selected value="2">approved</option></select>'
assert (check_string.encode('utf-8') not in r.data)
('u2')
def test_update_more_permissions(self, f_users, f_coprs, f_copr_permissions, f_db):
self.db.session.add_all([self.u2, self.c3])
self.test_client.post('/coprs/{0}/{1}/update_permissions/'.format(self.u2.name, self.c3.name), data={'copr_builder_1': '2', 'copr_admin_1': '1', 'copr_admin_3': '2'}, follow_redirects=True)
self.u1 = self.db.session.merge(self.u1)
self.u3 = self.db.session.merge(self.u3)
self.c3 = self.db.session.merge(self.c3)
u1_c3_perms = self.models.CoprPermission.query.filter((self.models.CoprPermission.copr_id == self.c3.id)).filter((self.models.CoprPermission.user_id == self.u1.id)).first()
assert (u1_c3_perms.copr_builder == self.helpers.PermissionEnum('approved'))
assert (u1_c3_perms.copr_admin == self.helpers.PermissionEnum('request'))
u3_c3_perms = self.models.CoprPermission.query.filter((self.models.CoprPermission.copr_id == self.c3.id)).filter((self.models.CoprPermission.user_id == self.u3.id)).first()
assert (u3_c3_perms.copr_builder == self.helpers.PermissionEnum('nothing'))
assert (u3_c3_perms.copr_admin == self.helpers.PermissionEnum('approved'))
('u1')
def test_copr_admin_can_update_permissions(self, f_users, f_coprs, f_copr_permissions, f_db):
self.db.session.add_all([self.u2, self.c3])
r = self.test_client.post('/coprs/{0}/{1}/update_permissions/'.format(self.u2.name, self.c3.name), data={'copr_builder_1': '2', 'copr_admin_3': '2'}, follow_redirects=True)
assert (b'Project permissions were updated' in r.data)
('u1')
def test_copr_admin_can_give_up_his_permissions(self, f_users, f_coprs, f_copr_permissions, f_db):
self.db.session.add_all([self.u2, self.c3, self.cp2, self.cp3])
r = self.test_client.post('/coprs/{0}/{1}/update_permissions/'.format(self.u2.name, self.c3.name), data={'copr_admin_1': '1', 'copr_admin_3': '1'}, follow_redirects=True)
self.u1 = self.db.session.merge(self.u1)
self.c3 = self.db.session.merge(self.c3)
perm = self.models.CoprPermission.query.filter((self.models.CoprPermission.user_id == self.u1.id)).filter((self.models.CoprPermission.copr_id == self.c3.id)).first()
assert (perm.copr_admin == 1)
assert (b'Project permissions were updated' in r.data) |
class Bits(OctetString):
namedValues = namedval.NamedValues()
def __new__(cls, *args, **kwargs):
if ('namedValues' in kwargs):
Bits = cls.withNamedBits(**kwargs.pop('namedValues'))
return Bits(*args, **kwargs)
return OctetString.__new__(cls)
def prettyIn(self, bits):
if (not isinstance(bits, (tuple, list))):
return OctetString.prettyIn(self, bits)
octets = []
for bit in bits:
v = self.namedValues.getValue(bit)
if (v is None):
raise error.ProtocolError(('Unknown named bit %s' % bit))
(d, m) = divmod(v, 8)
if (d >= len(octets)):
octets.extend(([0] * ((d - len(octets)) + 1)))
octets[d] |= (1 << (7 - m))
return OctetString.prettyIn(self, octets)
def prettyOut(self, value):
names = []
ints = self.__class__(value).asNumbers()
for (i, v) in enumerate(ints):
v = ints[i]
j = 7
while (j >= 0):
if (v & (1 << j)):
name = self.namedValues.getName((((i * 8) + 7) - j))
if (name is None):
name = ('UnknownBit-%s' % ((((i * 8) + 7) - j),))
names.append(name)
j -= 1
return ', '.join([str(x) for x in names])
def withNamedBits(cls, **values):
enums = set(cls.namedValues.items())
enums.update(values.items())
class X(cls):
namedValues = namedval.NamedValues(*enums)
X.__name__ = cls.__name__
return X |
def main(server, username, startpage, fromuts, touts, outfile, infotype='recenttracks'):
trackdict = dict()
page = startpage
totalpages = (- 1)
n = 0
try:
for (page, totalpages, tracks) in get_tracks(server, username, fromuts, touts, startpage, tracktype=infotype):
print(('Got page %s of %s..' % (page, totalpages)))
for track in tracks:
if (infotype == 'recenttracks'):
trackdict.setdefault(track[0], track)
else:
n += 1
trackdict.setdefault(n, track)
except ValueError as e:
exit(e)
except Exception:
raise
finally:
with open(outfile, 'a') as outfileobj:
tracks = sorted(list(trackdict.values()), reverse=True)
write_tracks(tracks, outfileobj)
print(('Wrote page %s-%s of %s to file %s' % (startpage, page, totalpages, outfile))) |
class ipv6_src(oxm):
type_len =
def __init__(self, value=None):
if (value != None):
self.value = value
else:
self.value = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack('!L', self.type_len))
packed.append(struct.pack('!16s', self.value))
return ''.join(packed)
def unpack(reader):
obj = ipv6_src()
_type_len = reader.read('!L')[0]
assert (_type_len == )
obj.value = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.value != other.value):
return False
return True
def pretty_print(self, q):
q.text('ipv6_src {')
with q.group():
with q.indent(2):
q.breakable()
q.text('value = ')
q.text(util.pretty_ipv6(self.value))
q.breakable()
q.text('}') |
def run_line_search(f, df, get_p, x0, alpha_0=1, alpha_max=1):
x = x0
alpha_prev = None
gradients = list()
step_dirs = list()
for i in range(50):
f_0 = f(x)
grad = np.array(df(x))
norm = np.linalg.norm(grad)
print(f'{i:02d} norm(grad)={norm:.6f}')
if (norm < 0.0001):
print('Converged!')
break
p = get_p(x)
gradients.append(grad)
step_dirs.append(p)
alpha_guess = 1
if alpha_prev:
numer = gradients[(- 2)].dot(step_dirs[(- 2)])
denom = gradients[(- 1)].dot(step_dirs[(- 1)])
alpha_guess = ((alpha_prev * numer) / denom)
alpha_guess = min(alpha_max, alpha_guess)
print(f' using alpha_guess={alpha_guess:.6f}')
assert (alpha_guess > 0)
kwargs = {'f': f, 'df': df, 'x0': x, 'p': p, 'f0': f_0, 'g0': grad, 'alpha_init': alpha_guess}
(alpha, f_, df_) = wolfe(**kwargs)
print(f' alpha={alpha:.6f}')
x = (x + (alpha * p))
alpha_prev = alpha |
class Solution():
def flipEquiv(self, root1: TreeNode, root2: TreeNode) -> bool:
if ((root1 is None) and (root2 is None)):
return True
elif ((root1 is None) or (root2 is None)):
return False
if (root1.val != root2.val):
return False
return ((self.flipEquiv(root1.left, root2.left) and self.flipEquiv(root1.right, root2.right)) or (self.flipEquiv(root1.right, root2.left) and self.flipEquiv(root1.left, root2.right))) |
class TestNursingTask(FrappeTestCase):
def setUp(self) -> None:
nursing_checklist_templates = frappe.get_test_records('Nursing Checklist Template')
self.activity = frappe.get_doc(nursing_checklist_templates[0]).insert(ignore_if_duplicate=True)
self.nc_template = frappe.get_doc(nursing_checklist_templates[1]).insert(ignore_if_duplicate=True)
self.settings = frappe.get_single('Healthcare Settings')
self.settings.validate_nursing_checklists = 1
self.settings.save()
(self.patient, self.practitioner) = create_healthcare_docs()
def test_lab_test_submission_should_validate_pending_nursing_tasks(self):
self.lt_template = create_lab_test_template()
self.lt_template.nursing_checklist_template = self.nc_template.name
self.lt_template.save()
lab_test = create_lab_test(self.lt_template)
lab_test.descriptive_test_items[0].result_value = 12
lab_test.descriptive_test_items[1].result_value = 1
lab_test.descriptive_test_items[2].result_value = 2.3
lab_test.save()
self.assertRaises(frappe.ValidationError, lab_test.submit)
complete_nusing_tasks(lab_test)
lab_test.submit()
def test_start_clinical_procedure_should_validate_pending_nursing_tasks(self):
procedure_template = create_clinical_procedure_template()
procedure_template.allow_stock_consumption = 1
procedure_template.pre_op_nursing_checklist_template = self.nc_template.name
procedure_template.save()
procedure = create_procedure(procedure_template, self.patient, self.practitioner)
self.assertRaises(frappe.ValidationError, procedure.start_procedure)
complete_nusing_tasks(procedure)
procedure.start_procedure()
def test_admit_inpatient_should_validate_pending_nursing_tasks(self):
self.settings.allow_discharge_despite_unbilled_services = 1
self.settings.save()
ip_record = create_inpatient(self.patient)
ip_record.admission_nursing_checklist_template = self.nc_template.name
ip_record.expected_length_of_stay = 0
ip_record.save(ignore_permissions=True)
NursingTask.create_nursing_tasks_from_template(ip_record.admission_nursing_checklist_template, ip_record, start_time=now_datetime())
service_unit = get_healthcare_service_unit()
kwargs = {'inpatient_record': ip_record, 'service_unit': service_unit, 'check_in': now_datetime()}
self.assertRaises(frappe.ValidationError, admit_patient, **kwargs)
complete_nusing_tasks(ip_record)
admit_patient(**kwargs)
ip_record.discharge_nursing_checklist_template = self.nc_template.name
ip_record.save()
NursingTask.create_nursing_tasks_from_template(ip_record.admission_nursing_checklist_template, ip_record, start_time=now_datetime())
self.assertRaises(frappe.ValidationError, discharge_patient, inpatient_record=ip_record)
complete_nusing_tasks(ip_record)
discharge_patient(ip_record)
def test_submit_therapy_session_should_validate_pending_nursing_tasks(self):
therapy_type = create_therapy_type()
therapy_type.nursing_checklist_template = self.nc_template.name
therapy_type.save()
therapy_plan = create_therapy_plan()
therapy_session = create_therapy_session(self.patient, therapy_type.name, therapy_plan.name)
self.assertRaises(frappe.ValidationError, therapy_session.submit)
complete_nusing_tasks(therapy_session)
therapy_session.submit() |
def test_registering_to_multiple_types_with_lists(prepare_publishers):
called = []
(['Test', 'Test2'])
def func1():
called.append('func1')
(['Test', 'Test3'])
def func2():
called.append('func2')
(['Test2', 'Test3'])
def func3():
called.append('func3')
def func4():
called.append('func4')
assert (called == [])
called = []
run_publishers('Test')
assert (called == ['func4', 'func1', 'func2'])
called = []
run_publishers('Test2')
assert (called == ['func4', 'func1', 'func3'])
called = []
run_publishers('Test3')
assert (called == ['func4', 'func2', 'func3']) |
class TriggerObject():
def __init__(self, **kwargs) -> None:
self.trigger_name = kwargs.get('name')
self.trigger = kwargs.get('trigger', None)
self.responses = kwargs.get('responses', None)
self.owner = kwargs.get('owner', None)
self.guild = kwargs.get('guild', None)
self.cooldown = kwargs.get('cooldown', 0)
self.timestamp = None
self.uses = kwargs.get('uses', 0)
self.toggle = kwargs.get('toggle', False)
self.case_sensitive = kwargs.get('case_sensitive', True)
self.word_boundary = kwargs.get('word_boundary', False)
self.embed_search = kwargs.get('embed_search', False)
self.pattern = None
def check(self, message):
if (not self.toggle):
return False
trigger = self.trigger
content = message.content
if (not self.case_sensitive):
trigger = trigger.lower()
content = content.lower()
if (self.cooldown > 0):
if (self.timestamp is None):
self.timestamp = datetime.datetime.now(tz=datetime.timezone.utc)
else:
now = datetime.datetime.now(tz=datetime.timezone.utc)
diff = (now - self.timestamp)
if (diff.total_seconds() < self.cooldown):
return False
else:
self.timestamp = now
if self.word_boundary:
if (self.pattern is None):
self.pattern = re.compile(f'{re.escape(self.trigger.lower())}', flags=re.I)
if self.pattern.search(content):
return True
elif (trigger in content):
return True
elif self.embed_search:
embeds = message.embeds
if (len(embeds) > 0):
embed_dict_list = []
for embed in embeds:
embed_dict_list.append(embed.to_dict())
if (self.pattern is None):
self.pattern = re.compile(f'{re.escape(self.trigger.lower())}', flags=re.I)
if self.pattern.search(str(embed_dict_list)):
return True
return False
async def respond(self, message):
response = random.choice(self.responses)
self.uses += 1
self.timestamp = datetime.datetime.now(tz=datetime.timezone.utc)
objects = {'user': message.author, 'uses': self.uses, 'channel': message.channel, 'guild': message.guild, 'message': message, 'trigger': self.trigger_name}
resp = self.transform_message(response, objects)
(await message.channel.send(resp))
def __repr__(self) -> str:
return f'<TriggerObject trigger={self.trigger}>'
def transform_parameter(result, objects) -> str:
raw_result = (('{' + result) + '}')
if (result in objects):
return str(objects[result])
try:
(first, second) = result.split('.')
except ValueError:
return raw_result
if ((first in objects) and (not second.startswith('_'))):
first = objects[first]
else:
return raw_result
return str(getattr(first, second, raw_result))
def transform_message(self, message, objects):
results = re.findall('{([^}]+)\\}', message)
for result in results:
param = self.transform_parameter(result, objects)
message = message.replace((('{' + result) + '}'), param)
return message |
def create_jobs_json(realization: Realization) -> None:
jobs = {'global_environment': {}, 'config_path': '/dev/null', 'config_file': '/dev/null', 'jobList': [{'name': forward_model.name, 'executable': forward_model.executable, 'argList': forward_model.arglist} for forward_model in realization.forward_models], 'run_id': '0', 'ert_pid': '0', 'real_id': str(realization.iens)}
realization_run_path = Path(realization.run_arg.runpath)
realization_run_path.mkdir()
with open((realization_run_path / 'jobs.json'), mode='w', encoding='utf-8') as f:
json.dump(jobs, f) |
('foundry_dev_tools.foundry_api_client.FoundryRestClient.get_dataset_identity', MagicMock())
def test_branch_extracted():
parsed = FoundryFileSystem._get_kwargs_from_urls('foundry://dev-iteration:super-secret-.main.dataset.fake1bb5-be92-4ad9-aa3e-07c/test.txt')
assert (parsed['token'] == 'super-secret-token')
assert (parsed['dataset'] == 'ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert (parsed['branch'] == 'dev-iteration')
parsed = FoundryFileSystem._get_kwargs_from_urls('foundry://dev-iteration:.main.dataset.fake1bb5-be92-4ad9-aa3e-07c/test.txt')
assert ('token' not in parsed)
assert (parsed['dataset'] == 'ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert (parsed['branch'] == 'dev-iteration')
parsed = FoundryFileSystem._get_kwargs_from_urls('foundry://dev-.main.dataset.fake1bb5-be92-4ad9-aa3e-07c/test.txt')
assert ('token' not in parsed)
assert (parsed['dataset'] == 'ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert (parsed['branch'] == 'dev-iteration')
parsed = FoundryFileSystem._get_kwargs_from_urls('foundry://:super-secret-.main.dataset.fake1bb5-be92-4ad9-aa3e-07c/test.txt')
assert (parsed['token'] == 'super-secret-token')
assert (parsed['dataset'] == 'ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert ('branch' not in parsed)
parsed = FoundryFileSystem._get_kwargs_from_urls('foundry://ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c/test.txt')
assert ('token' not in parsed)
assert (parsed['dataset'] == 'ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert ('branch' not in parsed)
parsed = FoundryFileSystem._get_kwargs_from_urls('foundry://ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert ('token' not in parsed)
assert (parsed['dataset'] == 'ri.foundry.main.dataset.fake1bb5-be92-4ad9-aa3e-07c')
assert ('branch' not in parsed)
with pytest.raises(FoundryDatasetPathInUrlNotSupportedError):
FoundryFileSystem._get_kwargs_from_urls('foundry:///path/to//fsspec_write_test_folder/test.txt') |
.parametrize('key, plot_name', [('FOPR', STATISTICS), ('FOPR', ENSEMBLE), ('SNAKE_OIL_PARAM:OP1_OCTAVES', CROSS_CASE_STATISTICS), ('SNAKE_OIL_PARAM:OP1_OCTAVES', DISTRIBUTION), ('SNAKE_OIL_PARAM:OP1_OCTAVES', GAUSSIAN_KDE), ('SNAKE_OIL_PARAM:OP1_OCTAVES', HISTOGRAM)])
def test_that_all_snake_oil_visualisations_matches_snapshot(qtbot, enkf_main_snake_oil, storage, plot_name, key):
args_mock = Mock()
args_mock.config = 'snake_oil.ert'
with StorageService.init_service(ert_config='snake_oil.ert', project=storage.path):
gui = _setup_main_window(enkf_main_snake_oil, args_mock, GUILogHandler())
gui.notifier.set_storage(storage)
qtbot.addWidget(gui)
plot_tool = gui.tools['Create plot']
plot_tool.trigger()
qtbot.waitUntil((lambda : (gui.findChild(PlotWindow) is not None)))
plot_window = gui.findChild(PlotWindow)
central_tab = plot_window._central_tab
.mpl_image_compare(tolerance=10)
def inner():
data_types = plot_window.findChild(DataTypeKeysWidget)
key_list = data_types.data_type_keys_widget
for i in range(key_list.model().rowCount()):
key_list.setCurrentIndex(key_list.model().index(i, 0))
selected_key = data_types.getSelectedItem()
if (selected_key['key'] == key):
for (i, tab) in enumerate(plot_window._plot_widgets):
if (tab.name == plot_name):
if central_tab.isTabEnabled(i):
central_tab.setCurrentWidget(tab)
assert (selected_key['dimensionality'] == tab._plotter.dimensionality)
return tab._figure.figure
else:
assert (selected_key['dimensionality'] != tab._plotter.dimensionality)
inner()
plot_window.close() |
def dataclassNonDefaults(obj) -> dict:
if (not is_dataclass(obj)):
raise TypeError(f'Object {obj} is not a dataclass')
values = [getattr(obj, field.name) for field in fields(obj)]
return {field.name: value for (field, value) in zip(fields(obj), values) if ((value != field.default) and (value == value) and (not (isinstance(value, list) and (value == []))))} |
class WorkerGroupSpec(_common.FlyteIdlEntity):
def __init__(self, group_name: str, replicas: int, min_replicas: typing.Optional[int]=0, max_replicas: typing.Optional[int]=None, ray_start_params: typing.Optional[typing.Dict[(str, str)]]=None):
self._group_name = group_name
self._replicas = replicas
self._min_replicas = min_replicas
self._max_replicas = (max_replicas if max_replicas else replicas)
self._ray_start_params = ray_start_params
def group_name(self):
return self._group_name
def replicas(self):
return self._replicas
def min_replicas(self):
return self._min_replicas
def max_replicas(self):
return self._max_replicas
def ray_start_params(self):
return self._ray_start_params
def to_flyte_idl(self):
return _ray_pb2.WorkerGroupSpec(group_name=self.group_name, replicas=self.replicas, min_replicas=self.min_replicas, max_replicas=self.max_replicas, ray_start_params=self.ray_start_params)
def from_flyte_idl(cls, proto):
return cls(group_name=proto.group_name, replicas=proto.replicas, min_replicas=proto.min_replicas, max_replicas=proto.max_replicas, ray_start_params=proto.ray_start_params) |
def test_restructure_cfg_loop_two_back_edges_condition_5(task):
task.graph.add_nodes_from((vertices := [BasicBlock(0, instructions=[Assignment(variable(name='i'), Constant(0)), Assignment(variable(name='x'), Constant(42))]), BasicBlock(1, instructions=[Assignment(variable(name='i'), BinaryOperation(OperationType.plus, [variable(name='i'), Constant(1)])), Branch(Condition(OperationType.not_equal, [variable(name='i'), Constant(3)]))]), BasicBlock(2, instructions=[Assignment(variable(name='x'), BinaryOperation(OperationType.minus, [variable(name='x'), variable(name='i')]))]), BasicBlock(3, instructions=[Assignment(variable(name='j'), Constant(0)), Branch(Condition(OperationType.not_equal, [variable(name='x'), Constant(3)]))]), BasicBlock(4, instructions=[Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [variable('x')]))]), BasicBlock(5, instructions=[Assignment(variable(name='j'), BinaryOperation(OperationType.plus, [variable(name='j'), Constant(1)])), Branch(Condition(OperationType.not_equal, [variable(name='j'), Constant(3)]))]), BasicBlock(6, instructions=[Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [variable('j')]))]), BasicBlock(7, instructions=[Return([variable(name='x')])])]))
task.graph.add_edges_from([UnconditionalEdge(vertices[0], vertices[1]), TrueCase(vertices[1], vertices[2]), FalseCase(vertices[1], vertices[5]), UnconditionalEdge(vertices[2], vertices[3]), FalseCase(vertices[3], vertices[1]), TrueCase(vertices[3], vertices[4]), UnconditionalEdge(vertices[4], vertices[5]), TrueCase(vertices[5], vertices[6]), FalseCase(vertices[5], vertices[7]), UnconditionalEdge(vertices[6], vertices[1])])
PatternIndependentRestructuring().run(task)
assert isinstance((seq_node := task._ast.root), SeqNode)
assert (len(seq_node.children) == 3)
assert (isinstance(seq_node.children[0], CodeNode) and (seq_node.children[0].instructions == vertices[0].instructions))
assert (isinstance(seq_node.children[2], CodeNode) and (seq_node.children[2].instructions == vertices[7].instructions))
assert isinstance((loop_node := seq_node.children[1]), WhileLoopNode)
assert loop_node.is_endless_loop
assert (isinstance(loop_node.body, SeqNode) and (len(loop_node.body.children) == 5))
assert (isinstance(loop_node.body.children[0], CodeNode) and (loop_node.body.children[0].instructions == vertices[1].instructions[:(- 1)]))
assert isinstance((loop_middle := loop_node.body.children[1]), ConditionNode)
assert (isinstance(loop_node.body.children[2], CodeNode) and (loop_node.body.children[2].instructions == vertices[5].instructions[:(- 1)]))
assert isinstance((break_condition := loop_node.body.children[3]), ConditionNode)
assert (isinstance(loop_node.body.children[4], CodeNode) and (loop_node.body.children[4].instructions == vertices[6].instructions))
assert loop_middle.condition.is_symbol
assert isinstance((loop_middle_seq := loop_middle.true_branch_child), SeqNode)
assert (loop_middle.false_branch is None)
assert (isinstance((cond := task._ast.condition_map[loop_middle.condition]), Condition) and (str(cond) == 'i#0 != 0x3'))
assert (len(loop_middle_seq.children) == 3)
assert (isinstance(loop_middle_seq.children[0], CodeNode) and (loop_middle_seq.children[0].instructions == (vertices[2].instructions + vertices[3].instructions[:(- 1)])))
assert isinstance((continue_branch := loop_middle_seq.children[1]), ConditionNode)
assert continue_branch.condition.is_negation
assert (isinstance(continue_branch.true_branch_child, CodeNode) and (continue_branch.true_branch_child.instructions == [Continue()]))
assert (continue_branch.false_branch is None)
assert (isinstance((cond := task._ast.condition_map[(~ continue_branch.condition)]), Condition) and (str(cond) == 'x#0 != 0x3'))
assert (isinstance(loop_middle_seq.children[2], CodeNode) and (loop_middle_seq.children[2].instructions == vertices[4].instructions))
assert break_condition.condition.is_negation
assert (isinstance(break_condition.true_branch_child, CodeNode) and (break_condition.true_branch_child.instructions == [Break()]))
assert (break_condition.false_branch is None)
assert (isinstance((cond := task._ast.condition_map[(~ break_condition.condition)]), Condition) and (str(cond) == 'j#0 != 0x3')) |
.parametrize('return_code', [SUCCESS, FAILURE])
def test_fal_model_task_when_dbt_succeeds(mocker, return_code):
task = FalModelTask(['a', 'b'], script=FalLocalHookTask('something.py', bound_model=FakeModel('model')))
task.set_run_index(DynamicIndexProvider())
fal_dbt = FakeFalDbt('/test')
mock_dbt_run(mocker, SUCCESS)
mock_script_construction(mocker, return_code)
assert (task.execute(None, fal_dbt) == return_code) |
class OptionPlotoptionsErrorbarLabelStyle(Options):
def fontSize(self):
return self._config_get('0.8em')
def fontSize(self, num: float):
self._config(num, js_type=False)
def fontWeight(self):
return self._config_get('bold')
def fontWeight(self, text: str):
self._config(text, js_type=False) |
class SystemInfo(BaseTest):
def setUp(self):
self.session = SessionURL(self.url, self.password, volatile=True)
modules.load_modules(self.session)
self.run_argv = modules.loaded['shell_sh'].run_argv
def _spoil_vectors_but(self, vector_safe_name):
for i in range(0, len(modules.loaded['shell_sh'].vectors)):
name = modules.loaded['shell_sh'].vectors[i].name
payload = modules.loaded['shell_sh'].vectors[i].arguments[0]
if (name != vector_safe_name):
modules.loaded['shell_sh'].vectors[i] = PhpCode(('\'"%s' % payload), name)
def test_run_unless(self):
vector_safe_name = 'proc_open'
self._spoil_vectors_but(vector_safe_name)
self.assertEqual(self.run_argv(['echo -n 1']), '1')
self.assertEqual(self.session['shell_sh']['stored_args']['vector'], vector_safe_name)
def test_param_vector(self):
vector_safe_name = 'proc_open'
self.assertEqual(self.run_argv(['-vector', vector_safe_name, 'echo -n 1']), '1')
self.assertEqual(self.session['shell_sh']['stored_args']['vector'], vector_safe_name)
def test_vector_one_os(self):
bogus_vector = 'bogus_win'
modules.loaded['shell_sh'].vectors.append(PhpCode('echo(1);', name=bogus_vector, target=Os.WIN))
self.assertRaises(ArgparseError, self.run_argv, ['-vector', bogus_vector, 'echo 1'])
def test_vector_all_os(self):
bogus_vector = 'bogus_win'
modules.loaded['shell_sh'].vectors.append(PhpCode('echo(1);', name=bogus_vector, target=Os.WIN))
self._spoil_vectors_but(bogus_vector)
self.assertIsNone(self.run_argv(['echo 1']), None) |
def swag_from(specs=None, filetype=None, endpoint=None, methods=None, validation=False, schema_id=None, data=None, definition=None, validation_function=None, validation_error_handler=None):
def resolve_path(function, filepath):
try:
from pathlib import Path
if isinstance(filepath, Path):
filepath = str(filepath)
except ImportError:
pass
if (not filepath.startswith('/')):
if (not hasattr(function, 'root_path')):
function.root_path = get_root_path(function)
res = os.path.join(function.root_path, filepath)
return res
return filepath
def set_from_filepath(function):
final_filepath = resolve_path(function, specs)
function.swag_type = (filetype or final_filepath.split('.')[(- 1)])
if (endpoint or methods):
if (not hasattr(function, 'swag_paths')):
function.swag_paths = {}
if ((not endpoint) and (not methods)):
function.swag_path = final_filepath
elif (endpoint and methods):
for verb in methods:
key = '{}_{}'.format(endpoint, verb.lower())
function.swag_paths[key] = final_filepath
elif (endpoint and (not methods)):
function.swag_paths[endpoint] = final_filepath
elif (methods and (not endpoint)):
for verb in methods:
function.swag_paths[verb.lower()] = final_filepath
def set_from_specs_dict(function):
function.specs_dict = specs
def is_path(specs):
is_str_path = isinstance(specs, string_types)
try:
from pathlib import Path
is_py3_path = isinstance(specs, Path)
return (is_str_path or is_py3_path)
except ImportError:
return is_str_path
def decorator(function):
if is_path(specs):
set_from_filepath(function)
swag_path = getattr(function, 'swag_path', None)
swag_paths = getattr(function, 'swag_paths', None)
validate_args = {'filepath': (swag_path or swag_paths), 'root': getattr(function, 'root_path', None)}
if isinstance(specs, dict):
set_from_specs_dict(function)
validate_args = {'specs': specs}
(function)
def wrapper(*args, **kwargs):
if (validation is True):
validate(data, (schema_id or definition), validation_function=validation_function, validation_error_handler=validation_error_handler, **validate_args)
return function(*args, **kwargs)
return wrapper
return decorator |
class TestESP32Image(BaseTestCase):
def _test_elf2image(self, elfpath, binpath, extra_args=[]):
try:
self.run_elf2image('esp32', elfpath, extra_args=extra_args)
image = esptool.bin_image.LoadFirmwareImage('esp32', binpath)
self.assertImageInfo(binpath, 'esp32', (True if ('--ram-only-header' not in extra_args) else False))
return image
finally:
try_delete(binpath)
def test_bootloader(self):
ELF = 'esp32-bootloader.elf'
BIN = 'esp32-bootloader.bin'
image = self._test_elf2image(ELF, BIN)
assert (len(image.segments) == 3)
for section in ['.iram1.text', '.iram_pool_1.text', '.dram0.rodata']:
self.assertImageContainsSection(image, ELF, section)
def test_app_template(self):
ELF = 'esp32-app-template.elf'
BIN = 'esp32-app-template.bin'
image = self._test_elf2image(ELF, BIN)
assert (len(image.segments) == 5)
for section in ['.iram0.vectors', '.dram0.data', '.flash.rodata', '.flash.text']:
self.assertImageContainsSection(image, ELF, section)
for mergedsection in ['.iram0.text']:
self.assertImageDoesNotContainSection(image, ELF, mergedsection)
def test_too_many_sections(self, capsys):
ELF = 'esp32-too-many-sections.elf'
BIN = 'esp32-too-many-sections.bin'
with pytest.raises(subprocess.CalledProcessError):
self._test_elf2image(ELF, BIN)
output = capsys.readouterr().out
assert ('max 16' in output)
assert ('linker script' in output)
def test_use_segments(self):
ELF = 'esp32-zephyr.elf'
BIN = 'esp32-zephyr.bin'
image = self._test_elf2image(ELF, BIN)
assert (len(image.segments) == 4)
image = self._test_elf2image(ELF, BIN, ['--use_segments'])
assert (len(image.segments) == 2)
def test_ram_only_header(self):
ELF = 'esp32-app-template.elf'
BIN = 'esp32-app-template.bin'
image = self._test_elf2image(ELF, BIN, ['--ram-only-header'])
assert (len(image.segments) == 2) |
class Query(_Expr):
def to_fauna_json(self):
return {'': self.value}
def __repr__(self):
return ('Query(%s)' % repr(self.value))
def __eq__(self, other):
return (isinstance(other, Query) and (self.value == other.value))
def __ne__(self, other):
return (not (self == other)) |
def test_gradient():
c = ft.Container(gradient=ft.LinearGradient(colors=[], tile_mode='mirror'))
cmd = c._build_add_commands()
assert (cmd[0].attrs['gradient'] == '{"colors":[],"tile_mode":"mirror","begin":{"x":-1,"y":0},"end":{"x":1,"y":0},"type":"linear"}')
c = ft.Container(gradient=ft.LinearGradient(colors=[], tile_mode=ft.GradientTileMode.REPEATED))
cmd = c._build_add_commands()
assert (cmd[0].attrs['gradient'] == '{"colors":[],"tile_mode":"repeated","begin":{"x":-1,"y":0},"end":{"x":1,"y":0},"type":"linear"}')
c = ft.Container(gradient=ft.LinearGradient(colors=[]))
cmd = c._build_add_commands()
assert (cmd[0].attrs['gradient'] == '{"colors":[],"tile_mode":"clamp","begin":{"x":-1,"y":0},"end":{"x":1,"y":0},"type":"linear"}') |
class BGPMessage(packet_base.PacketBase, TypeDisp):
_HDR_PACK_STR = '!16sHB'
_HDR_LEN = struct.calcsize(_HDR_PACK_STR)
_class_prefixes = ['BGP']
def __init__(self, marker=None, len_=None, type_=None):
super(BGPMessage, self).__init__()
if (marker is None):
self._marker = _MARKER
else:
self._marker = marker
self.len = len_
if (type_ is None):
type_ = self._rev_lookup_type(self.__class__)
self.type = type_
def parser(cls, buf):
if (len(buf) < cls._HDR_LEN):
raise stream_parser.StreamParser.TooSmallException(('%d < %d' % (len(buf), cls._HDR_LEN)))
(marker, len_, type_) = struct.unpack_from(cls._HDR_PACK_STR, six.binary_type(buf))
msglen = len_
if (len(buf) < msglen):
raise stream_parser.StreamParser.TooSmallException(('%d < %d' % (len(buf), msglen)))
binmsg = buf[cls._HDR_LEN:msglen]
rest = buf[msglen:]
subcls = cls._lookup_type(type_)
kwargs = subcls.parser(binmsg)
return (subcls(marker=marker, len_=len_, type_=type_, **kwargs), cls, rest)
def serialize(self, payload=None, prev=None):
self._marker = _MARKER
tail = self.serialize_tail()
self.len = (self._HDR_LEN + len(tail))
hdr = bytearray(struct.pack(self._HDR_PACK_STR, self._marker, self.len, self.type))
return (hdr + tail)
def __len__(self):
buf = self.serialize()
return len(buf) |
def extractMitchytranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('The Anarchic Consort of the Prince', 'The Anarchic Consort of the Prince', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
('firebase_auth_user_access', [SaaSRequestType.READ])
def firebase_auth_user_access(client: AuthenticatedClient, node: TraversalNode, policy: Policy, privacy_request: PrivacyRequest, input_data: Dict[(str, List[Any])], secrets: Dict[(str, Any)]) -> List[Row]:
app = initialize_firebase(secrets)
processed_data = []
identity = get_identity(privacy_request)
user: UserRecord
if (identity == 'email'):
emails = input_data.get('email', [])
for email in emails:
try:
user = auth.get_user_by_email(email, app=app)
processed_data.append(user_record_to_row(user))
except UserNotFoundError:
logger.warning(f'Could not find user with email {Pii(email)} in firebase')
elif (identity == 'phone_number'):
phone_numbers = input_data.get('phone_number', [])
for phone_number in phone_numbers:
try:
user = auth.get_user_by_phone_number(phone_number, app=app)
processed_data.append(user_record_to_row(user))
except UserNotFoundError:
logger.warning(f'Could not find user with phone_number {Pii(phone_number)} in firebase')
else:
raise FidesopsException('Unsupported identity type for Firebase connector. Currently only `email` and `phone_number` are supported')
return processed_data |
def glue(ways: List[OSMWay]) -> List[List[OSMNode]]:
result: List[List[OSMNode]] = []
to_process: Set[Tuple[OSMNode]] = set()
for way in ways:
if way.is_cycle():
result.append(way.nodes)
else:
to_process.add(tuple(way.nodes))
while to_process:
nodes: List[OSMNode] = list(to_process.pop())
glued: Optional[List[OSMNode]] = None
other_nodes: Optional[Tuple[OSMNode]] = None
for other_nodes in to_process:
glued = try_to_glue(nodes, list(other_nodes))
if (glued is not None):
break
if (glued is not None):
to_process.remove(other_nodes)
if is_cycle(glued):
result.append(glued)
else:
to_process.add(tuple(glued))
else:
result.append(nodes)
return result |
class Whitelist(object):
def __init__(self, db, config):
self.db = db
self.config = config
if self.config['DEBUG']:
print('Initializing Whitelist')
sql = 'CREATE TABLE IF NOT EXISTS Whitelist (\n PhoneNo TEXT PRIMARY KEY,\n Name TEXT,\n Reason TEXT,\n SystemDateTime TEXT)'
curs = self.db.cursor()
curs.executescript(sql)
curs.close()
if self.config['TESTING']:
caller = {'NAME': 'Bruce', 'NMBR': '', 'DATE': '0801', 'TIME': '1801'}
self.add_caller(caller, 'Whitelist test')
if self.config['DEBUG']:
print('Whitelist initialized')
def add_caller(self, call_record, reason=''):
query = 'INSERT INTO Whitelist(\n PhoneNo,\n Name,\n Reason,\n SystemDateTime) VALUES(?,?,?,?)'
arguments = [call_record['NMBR'], call_record['NAME'], reason, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:19]]
try:
self.db.execute(query, arguments)
self.db.commit()
if self.config['DEBUG']:
print('New whitelist entry added')
pprint(arguments)
except Exception as e:
print('** Failed to add caller to whitelist:')
pprint(e)
return False
return True
def remove_number(self, phone_no):
query = 'DELETE FROM Whitelist WHERE PhoneNo=:phone_no'
arguments = {'phone_no': phone_no}
self.db.execute(query, arguments)
self.db.commit()
try:
self.db.execute(query, arguments)
self.db.commit()
except Exception as e:
print('** Failed to delete caller from whitelist:')
pprint(e)
return False
if self.config['DEBUG']:
print('Whitelist entry removed')
pprint(arguments)
return True
def update_number(self, phone_no, name, reason):
sql = 'UPDATE Whitelist\n SET Name=:name, Reason=:reason, SystemDateTime=:time\n WHERE PhoneNo=:phone_no'
arguments = {'phone_no': phone_no, 'name': name, 'reason': reason, 'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:19]}
try:
self.db.execute(sql, arguments)
self.db.commit()
except Exception as e:
print('** Failed to update caller in whitelist:')
pprint(e)
return False
if self.config['DEBUG']:
print('Whitelist entry updated')
pprint(arguments)
return True
def check_number(self, number):
query = 'SELECT Reason FROM Whitelist WHERE PhoneNo=:number'
args = {'number': number}
results = query_db(self.db, query, args, False)
if (len(results) > 0):
return (True, results[0][0])
else:
return (False, '')
def get_number(self, number):
query = 'SELECT * FROM Whitelist WHERE PhoneNo = ?'
args = (number,)
results = query_db(self.db, query, args, False)
return results |
def extractNoveltyreadersBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def create_arg_from_val(type, val):
if ((type == Int) or (type == Bool) or (type == Image)):
return ConstIntArg(val)
elif (type == Float):
return ConstFloatArg(val)
elif (type == Complex):
return ComplexArg(ConstFloatArg(val[0]), ConstFloatArg(val[1]))
elif (type == Hyper):
return HyperArg(ConstFloatArg(val[0]), ConstFloatArg(val[1]), ConstFloatArg(val[2]), ConstFloatArg(val[3]))
elif (type == Color):
return ColorArg(ConstFloatArg(val[0]), ConstFloatArg(val[1]), ConstFloatArg(val[2]), ConstFloatArg(val[3]))
else:
raise TranslationError(('Internal Compiler Error: Unknown constant type %s' % type)) |
class MenuBar(Control):
def __init__(self, controls: Optional[List[Control]]=None, ref: Optional[Ref]=None, expand: Union[(None, bool, int)]=None, col: Optional[ResponsiveNumber]=None, opacity: OptionalNumber=None, visible: Optional[bool]=None, disabled: Optional[bool]=None, data: Any=None, clip_behavior: Optional[ClipBehavior]=None, style: Optional[MenuStyle]=None):
Control.__init__(self, ref=ref, expand=expand, col=col, opacity=opacity, visible=visible, disabled=disabled, data=data)
self.__controls: List[Control] = []
self.controls = controls
self.clip_behavior = clip_behavior
self.style = style
def _get_control_name(self):
return 'menubar'
def _before_build_command(self):
super()._before_build_command()
if (self.__style is not None):
self.__style.side = self._wrap_attr_dict(self.__style.side)
self.__style.shape = self._wrap_attr_dict(self.__style.shape)
self.__style.mouse_cursor = self._wrap_attr_dict(self.__style.mouse_cursor)
if self.__style.mouse_cursor:
for (k, v) in self.__style.mouse_cursor.items():
self.__style.mouse_cursor[k] = (v.value if isinstance(v, MouseCursor) else str(v))
self._set_attr_json('style', self.__style)
def _get_children(self):
return self.__controls
def controls(self):
return self.__controls
def controls(self, value):
self.__controls = (value if (value is not None) else [])
def clip_behavior(self) -> Optional[ClipBehavior]:
return self.__clip_behavior
_behavior.setter
def clip_behavior(self, value: Optional[ClipBehavior]):
self.__clip_behavior = value
self._set_attr('clipBehavior', (value.value if isinstance(value, ClipBehavior) else value))
def style(self) -> Optional[MenuStyle]:
return self.__style
def style(self, value: Optional[MenuStyle]):
self.__style = value |
.skipif((SclConvertor is None), reason='spec2scl not installed')
class TestSclIntegration(object):
sphinx_spec = '{0}/test_data/python-sphinx_autonc.spec'.format(tests_dir)
def setup_class(cls):
with open(cls.sphinx_spec, 'r') as spec:
cls.test_spec = spec.read()
def setup_method(self, method):
self.default_options = {'no_meta_runtime_dep': False, 'no_meta_buildtime_dep': False, 'skip_functions': [''], 'no_deps_convert': False, 'list_file': None, 'meta_spec': None}
flexmock(Convertor).should_receive('__init__').and_return(None)
flexmock(Convertor).should_receive('convert').and_return(self.test_spec)
.parametrize(('options', 'expected_options'), [(['--no-meta-runtime-dep'], {'no_meta_runtime_dep': True}), (['--no-meta-buildtime-dep'], {'no_meta_buildtime_dep': True}), (['--skip-functions=func1,func2'], {'skip_functions': ['func1', 'func2']}), (['--no-deps-convert'], {'no_deps_convert': True}), (['--list-file=file_name'], {'list_file': 'file_name'})])
def test_scl_convertor_args_correctly_passed(self, options, expected_options, capsys):
self.default_options.update(expected_options)
flexmock(SclConvertor).should_receive('convert').and_return(self.test_spec)
flexmock(SclConvertor).should_receive('__init__').with_args(options=self.default_options).once()
with pytest.raises(SystemExit):
main(args=(['foo_package', '--sclize'] + options))
(out, err) = capsys.readouterr()
assert (out == (self.test_spec + '\n'))
.parametrize(('options', 'omit_from_spec'), [({'no_meta_runtime_dep': True}, '%{?scl:Requires: %{scl}-runtime}'), ({'no_meta_buildtime_dep': True}, '{?scl:BuildRequires: %{scl}-runtime}'), ({'skip_functions': 'handle_python_specific_commands'}, '%{?scl:scl enable %{scl} - << \\EOF}\nset -e\nsphinx-build doc html\n%{?scl:EOF}')])
def test_convert_to_scl_options(self, options, omit_from_spec):
self.default_options.update({'skip_functions': ''})
self.default_options.update(options)
converted = convert_to_scl(self.test_spec, self.default_options)
assert (omit_from_spec not in converted) |
def test_drift_delta_with_drift():
measurements = np.arange(1, 11)
rng = default_rng(0)
noise = rng.normal(0, 0.5, (100, 10))
drift = (np.arange(1, 101) / 100).reshape((1, (- 1)))
measurements = ((measurements + noise) + drift.T)
correct_drifts = np.array([0., 0., 0., 0.9782566, 0., 0., 0., 0., 0., 0.])
drifts = calc_drift(measurements, sampling_rate=1, sample_period=10, method='Delta')
np.testing.assert_array_almost_equal(drifts, correct_drifts) |
.skipif(('pandas' not in sys.modules), reason='Pandas is not installed.')
def test_deck():
import pandas as pd
df = pd.DataFrame({'Name': ['Tom', 'Joseph'], 'Age': [1, 22]})
ctx = FlyteContextManager.current_context()
ctx.user_space_params._decks = [ctx.user_space_params.default_deck]
renderer = TopFrameRenderer()
deck_name = 'test'
deck = Deck(deck_name)
deck.append(renderer.to_html(df))
assert (deck.name == deck_name)
assert (deck.html is not None)
assert (len(ctx.user_space_params.decks) == 2)
_output_deck('test_task', ctx.user_space_params) |
class OptionPlotoptionsStreamgraphStatesHoverMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
def test():
assert (len(pattern1) == 2), 'pattern1'
assert (len(pattern2) == 2), 'pattern2'
assert (len(pattern1[0]) == 1), 'pattern1'
assert any(((pattern1[0].get(l) == 'iphone') for l in ('LOWER', 'lower'))), 'pattern1`iphone`'
assert (len(pattern1[1]) == 1), 'pattern1'
assert any(((pattern1[1].get(l) == 'x') for l in ('LOWER', 'lower'))), 'pattern1`x`'
assert (len(pattern2[0]) == 1), 'pattern2'
assert any(((pattern2[0].get(l) == 'iphone') for l in ('LOWER', 'lower'))), 'pattern2`iphone`'
assert (len(pattern2[1]) == 1), 'pattern2'
assert any(((pattern2[1].get(l) == True) for l in ('IS_DIGIT', 'is_digit'))), 'pattern2'
__msg__.good('!') |
class HierarchyBase(object):
def __init__(self, meshes, coarse_to_fine_cells, fine_to_coarse_cells, refinements_per_level=1, nested=False):
from firedrake_citations import Citations
Citations().register('Mitchell2016')
self._meshes = tuple(meshes)
self.meshes = tuple(meshes[::refinements_per_level])
self.coarse_to_fine_cells = coarse_to_fine_cells
self.fine_to_coarse_cells = fine_to_coarse_cells
self.refinements_per_level = refinements_per_level
self.nested = nested
for (level, m) in enumerate(meshes):
set_level(m, self, Fraction(level, refinements_per_level))
for (level, m) in enumerate(self):
set_level(m, self, level)
self._shared_data_cache = defaultdict(dict)
_property
def comm(self):
comm = self[0].comm
if (not all(((m.comm == comm) for m in self))):
raise NotImplementedError('All meshes in hierarchy must be on same communicator')
return comm
_property
def _comm(self):
_comm = self[0]._comm
if (not all(((m._comm == _comm) for m in self))):
raise NotImplementedError('All meshes in hierarchy must be on same communicator')
return _comm
def __iter__(self):
for m in self.meshes:
(yield m)
def __len__(self):
return len(self.meshes)
def __getitem__(self, idx):
return self.meshes[idx] |
class TransportAddressType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(SingleValueConstraint(*(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)))
namedValues = NamedValues(*(('local', 13), ('sctpDns', 16), ('sctpIpv4', 9), ('sctpIpv4z', 11), ('sctpIpv6', 10), ('sctpIpv6z', 12), ('tcpDns', 15), ('tcpIpv4', 5), ('tcpIpv4z', 7), ('tcpIpv6', 6), ('tcpIpv6z', 8), ('udpDns', 14), ('udpIpv4', 1), ('udpIpv4z', 3), ('udpIpv6', 2), ('udpIpv6z', 4), ('unknown', 0)))
if mibBuilder.loadTexts:
description = 'A value that represents a transport domain. This is the enumerated version of\nthe transport domain registrations in this MIB module. The enumerated values\nhave the following meaning: unknown(0) unknown transport address type\nudpIpv4(1) transportDomainUdpIpv4 udpIpv6(2) transportDomainUdpIpv6 udpIpv4z(3)\ntransportDomainUdpIpv4z udpIpv6z(4) transportDomainUdpIpv6z tcpIpv4(5)\ntransportDomainTcpIpv4 tcpIpv6(6) transportDomainTcpIpv6 tcpIpv4z(7)\ntransportDomainTcpIpv4z tcpIpv6z(8) transportDomainTcpIpv6z sctpIpv4(9)\ntransportDomainSctpIpv4 sctpIpv6(10) transportDomainSctpIpv6 sctpIpv4z(11)\ntransportDomainSctpIpv4z sctpIpv6z(12) transportDomainSctpIpv6z local(13)\ntransportDomainLocal udpDns(14) transportDomainUdpDns tcpDns(15)\ntransportDomainTcpDns sctpDns(16) transportDomainSctpDns This textual\nconvention can be used to represent transport domains in situations where a\nsyntax of TransportDomain is unwieldy (for example, when used as an index). The\nusage of this textual convention implies that additional transport domains can\nonly be supported by updating this MIB module. This extensibility restriction\ndoes not apply for the TransportDomain textual convention which allows MIB\nauthors to define additional transport domains independently in other MIB\nmodules.\n' |
class OptionSeriesLineSonificationDefaultinstrumentoptionsMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _get_accel(callback: Union[(Accelerator, Callable, None)], display_name: Optional[str]) -> Tuple[(Optional[Accelerator], Optional[Callable], Optional[str])]:
accelerator: Optional[Accelerator] = None
if isinstance(callback, Accelerator):
accelerator = callback
if (display_name is None):
display_name = accelerator.helptext
callback = accelerator.callback
return (accelerator, callback, display_name) |
def untangle(trees, cost_function=None, iterations=None, verbose=False):
from itertools import permutations
if (iterations == None):
iterations = 3
if (cost_function == None):
cost_function = (lambda pair: math.pow(abs((pair[0] - pair[1])), 2))
y_positions = {T: {k.name: k.y for k in T.getExternal()} for T in trees}
for iteration in range(iterations):
if verbose:
print(('Untangling iteration %d' % (iteration + 1)))
first_trees = (list(range((len(trees) - 1))) + [(- 1)])
next_trees = (list(range(1, len(trees))) + [0])
for (cur, nex) in zip(first_trees, next_trees):
tree1 = trees[cur]
tree2 = trees[nex]
if verbose:
print(('%d vs %d' % (cur, nex)))
for k in sorted(tree2.getInternal(), key=(lambda branch: branch.height)):
clade_y_positions = sorted([y_positions[tree2][tip] for tip in k.leaves])
costs = {}
if (len(k.children) >= 10):
raise RuntimeWarning('Node is too polytomic and untangling will take an astronomically long time')
if (verbose == True):
print(len(k.children))
for permutation in permutations(k.children):
clade_order = sum([([child.name] if child.is_leaf() else list(child.leaves)) for child in permutation], [])
new_y_positions = {clade_order[i]: clade_y_positions[i] for i in range(len(clade_y_positions))}
tip_costs = list(map(cost_function, [(y_positions[tree1][tip], new_y_positions[tip]) for tip in clade_order if (tip in y_positions[tree1])]))
costs[permutation] = (sum(tip_costs) / len(tip_costs))
best = sorted(costs.keys(), key=(lambda w: (- costs[w])))[0]
k.children = list(best)
tree2.drawTree()
for k in tree2.getExternal():
y_positions[tree2][k.name] = k.y
return trees |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.