code stringlengths 281 23.7M |
|---|
def test_EquationBC_mixedpoisson_matrix_fieldsplit():
mat_type = 'aij'
eq_type = 'linear'
porder = 2
solver_parameters = {'mat_type': mat_type, 'ksp_type': 'fgmres', 'ksp_rtol': 1e-08, 'ksp_max_it': 200, 'pc_type': 'fieldsplit', 'pc_fieldsplit_type': 'schur', 'pc_fieldsplit_schur_fact_type': 'full', 'fieldsplit_0_ksp_type': 'preonly', 'fieldsplit_0_pc_type': 'lu', 'fieldsplit_1_ksp_type': 'cg', 'fieldsplit_1_pc_type': 'none'}
err = []
mesh_sizes = [16, 32]
if (eq_type == 'linear'):
for (i, mesh_num) in enumerate(mesh_sizes):
err.append(linear_poisson_mixed(solver_parameters, mesh_num, porder))
elif (eq_type == 'nonlinear'):
for (i, mesh_num) in enumerate(mesh_sizes):
err.append(nonlinear_poisson_mixed(solver_parameters, mesh_num, porder))
assert (abs(((math.log2(err[0][0]) - math.log2(err[1][0])) - (porder + 1))) < 0.05) |
def test_analytic_mass(u_v):
(u, v) = u_v
a = (inner(u, v) * dx)
vals = assemble(a).M.values
analytic = np.asarray([[(1 / 36), (1 / 72), (1 / 72), (1 / 144), (1 / 72), (1 / 144)], [(1 / 72), (1 / 36), (1 / 144), (1 / 72), (1 / 144), (1 / 72)], [(1 / 72), (1 / 144), (1 / 36), (1 / 72), (1 / 72), (1 / 144)], [(1 / 144), (1 / 72), (1 / 72), (1 / 36), (1 / 144), (1 / 72)], [(1 / 72), (1 / 144), (1 / 72), (1 / 144), (1 / 36), (1 / 72)], [(1 / 144), (1 / 72), (1 / 144), (1 / 72), (1 / 72), (1 / 36)]])
assert np.allclose(sorted(np.linalg.eigvals(vals)), sorted(np.linalg.eigvals(analytic))) |
class SchemaCheck(object):
def __init__(self, config, schema, test_what, location):
self.loggit = logging.getLogger('curator.validators.SchemaCheck')
self.loggit.debug('Schema: %s', schema)
self.loggit.debug('"%s" config: %s', test_what, config)
self.config = config
self.schema = schema
self.test_what = test_what
self.location = location
self.badvalue = None
self.error = None
def __parse_error(self):
def get_badvalue(data_string, data):
elements = re.sub("[\\'\\]]", '', data_string).split('[')
elements.pop(0)
value = None
for k in elements:
try:
key = int(k)
except ValueError:
key = k
if (value is None):
value = data[key]
return value
try:
self.badvalue = get_badvalue(str(self.error).split()[(- 1)], self.config)
except Exception:
self.badvalue = '(could not determine)'
def result(self):
try:
return self.schema(self.config)
except Exception as err:
try:
self.error = err.errors[0]
except Exception:
self.error = f'{err}'
self.__parse_error()
self.loggit.error('Schema error: %s', self.error)
raise ConfigurationError(f'Configuration: {self.test_what}: Location: {self.location}: Bad Value: "{self.badvalue}", {self.error}. Check configuration file.') from err |
def assemble_word_html_content(user_email, subscription, todays_words):
print('assembling word content...')
print('list subscription: ', subscription)
url = os.environ['URL']
word = todays_words[subscription.list_id][0]['word']
print('selected word, ', word)
if (word is None):
return ''
else:
if (subscription.character_set == 'simplified'):
selected_word = word['simplified']
else:
selected_word = word['traditional']
if (subscription.list_name in ['HSK Level 1', 'HSK Level 2', 'HSK Level 3']):
example_link = (' + selected_word)
else:
example_link = (' + selected_word)
abs_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(abs_dir, 'word_template.html')) as fh:
word_template = fh.read()
hsk_level = subscription.list_name[(- 1)]
word_contents = word_template.replace('{word}', selected_word)
word_contents = word_contents.replace('{pronunciation}', word['pinyin'])
word_contents = word_contents.replace('{definition}', word['definition'])
word_contents = word_contents.replace('{link}', example_link)
word_contents = word_contents.replace('{list}', subscription.list_name)
word_contents = word_contents.replace('{quiz_link}', (((f'{url}/quiz?list_id=' + subscription.list_id) + '&date_range=30&ques=10&char=') + subscription.character_set))
word_contents = word_contents.replace('{review_link}', (((f'{url}/review?list_id=' + subscription.list_id) + '&date_range=30&char=') + subscription.character_set))
return word_contents |
def ray_tmp_dir(config: Dict[(Any, Any)], run_env: str) -> Generator[(Any, None, None)]:
out = sdk.run_on_cluster(config, run_env=run_env, cmd='echo $(mktemp -d)', with_output=True).decode()
tmppath = [x for x in out.strip().split() if (x.startswith('/tmp/') and ('ray-config' not in x))]
assert (len(tmppath) == 1), f'tmppath is : {tmppath}'
tmp_path = tmppath[0]
log.info(f'Created temp path on remote server {tmp_path}')
(yield tmp_path)
sdk.run_on_cluster(config, run_env=run_env, cmd=f'rm -rf {tmp_path}') |
def test_encode_1_variable_with_counts(df_enc):
encoder = CountFrequencyEncoder(encoding_method='count', variables=['var_A'])
X = encoder.fit_transform(df_enc)
transf_df = df_enc.copy()
transf_df['var_A'] = [6, 6, 6, 6, 6, 6, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 4, 4, 4, 4]
assert (encoder.encoding_method == 'count')
assert (encoder.variables == ['var_A'])
assert (encoder.variables_ == ['var_A'])
assert (encoder.encoder_dict_ == {'var_A': {'A': 6, 'B': 10, 'C': 4}})
assert (encoder.n_features_in_ == 3)
pd.testing.assert_frame_equal(X, transf_df) |
def test_default_values():
schema = {'type': 'record', 'name': 'test_default_values', 'fields': [{'name': 'default_field', 'type': 'string', 'default': 'default_value'}]}
records = [{}]
new_records = roundtrip(schema, records)
assert (new_records == [{'default_field': 'default_value'}]) |
def _message_handler(func: _C1, raw: _ce.CloudEvent) -> None:
event_attributes = raw._get_attributes()
data: _typing.Any = raw.get_data()
message: StorageObjectData = StorageObjectData(bucket=data['bucket'], generation=data['generation'], id=data['id'], metageneration=data['metageneration'], name=data['name'], size=data['size'], storage_class=data['storageClass'], cache_control=data.get('cacheControl'), component_count=data.get('componentCount'), content_disposition=data.get('contentDisposition'), content_encoding=data.get('contentEncoding'), content_language=data.get('contentLanguage'), content_type=data.get('contentType'), crc32c=data.get('crc32c'), etag=data.get('etag'), kind=data.get('kind'), md5_hash=data.get('md5Hash'), media_link=data.get('mediaLink'), metadata=data.get('metadata'), self_link=data.get('selfLink'), time_created=data.get('timeCreated'), time_deleted=data.get('timeDeleted'), time_storage_class_updated=data.get('timeStorageClassUpdated'), updated=data.get('updated'), customer_encryption=(CustomerEncryption(encryption_algorithm=data['customerEncryption']['encryptionAlgorithm'], key_sha256=data['customerEncryption']['keySha256']) if (data.get('customerEncryption') is not None) else None))
event: CloudEvent[StorageObjectData] = CloudEvent(data=message, id=event_attributes['id'], source=event_attributes['source'], specversion=event_attributes['specversion'], subject=(event_attributes['subject'] if ('subject' in event_attributes) else None), time=_dt.datetime.strptime(event_attributes['time'], '%Y-%m-%dT%H:%M:%S.%f%z'), type=event_attributes['type'])
func(event) |
class TestInterpolateConverter(AITTestCase):
([param(scale_factor=1, mode='nearest'), param(scale_factor=2, mode='nearest'), param(scale_factor=2, mode='bilinear')])
def test_interpolate(self, scale_factor, mode):
class TestModule(torch.nn.Module):
def forward(self, y: torch.Tensor) -> torch.Tensor:
x = torch.nn.functional.interpolate(y, scale_factor=scale_factor, mode=mode)
return x
model = TestModule().cuda().half()
inputs = [torch.randn([2, 8, 16, 16]).half().cuda()]
self.run_test(model, inputs, expected_ops={acc_ops.interpolate}) |
def test_not_really_json_parsing():
father = '{\n "id" : 1,\n "married" : true,\n "name" : "Larry Lopez",\n "sons" : null,\n "daughters" : [\n {\n "age" : 26,\n "name" : "Sandra"\n },\n {\n "age" : 25,\n "name" : "Margaret"\n },\n {\n "age" : 6,\n "name" : "Mary"\n }\n ]\n }'
more_fathers = ','.join(([father] * 60))
json = (('{"fathers" : [' + more_fathers) + ']}')
grammar = Grammar('\n value = space (string / number / object / array / true_false_null)\n space\n\n object = "{" members "}"\n members = (pair ("," pair)*)?\n pair = string ":" value\n array = "[" elements "]"\n elements = (value ("," value)*)?\n true_false_null = "true" / "false" / "null"\n\n string = space "\\"" chars "\\"" space\n chars = ~"[^\\"]*" # TODO implement the real thing\n number = (int frac exp) / (int exp) / (int frac) / int\n int = "-"? ((digit1to9 digits) / digit)\n frac = "." digits\n exp = e digits\n digits = digit+\n e = "e+" / "e-" / "e" / "E+" / "E-" / "E"\n\n digit1to9 = ~"[1-9]"\n digit = ~"[0-9]"\n space = ~"\\s*"\n ')
NUMBER = 1
REPEAT = 5
total_seconds = min(repeat((lambda : grammar.parse(json)), (lambda : gc.enable()), repeat=REPEAT, number=NUMBER))
seconds_each = (total_seconds / NUMBER)
kb = (len(json) / 1024.0)
print(('Took %.3fs to parse %.1fKB: %.0fKB/s.' % (seconds_each, kb, (kb / seconds_each)))) |
def test():
model.eval()
test_loss = 0
correct = 0
for (data, target) in test_loader:
if args.cuda:
(data, target) = (data.cuda(), target.cuda())
(data, target) = (Variable(data, volatile=True), Variable(target))
data = data.view((- 1), (28 * 28))
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset), ((100.0 * correct) / len(test_loader.dataset)))) |
class PriorityTab(QWidget):
def __init__(self, priority_list, parent):
QWidget.__init__(self)
self.parent = parent
self.t_view = QTableView()
html_delegate = HTMLDelegate()
model = self.get_model(priority_list)
self.t_view.setItemDelegateForColumn(0, html_delegate)
self.t_view.setItemDelegateForColumn(1, html_delegate)
self.t_view.setModel(model)
self.set_remove_btns(priority_list)
self.t_view.resizeColumnsToContents()
self.t_view.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)
if ((priority_list is not None) and (len(priority_list) > 0)):
self.t_view.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch)
self.t_view.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)
self.t_view.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeMode.ResizeToContents)
self.t_view.resizeRowsToContents()
self.t_view.verticalHeader().setSectionsMovable(False)
self.t_view.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)
self.vbox = QVBoxLayout()
lbl = QLabel("'Remove' will only remove the item from the queue, not delete it.")
self.vbox.addWidget(lbl)
self.vbox.addWidget(self.t_view)
self.setLayout(self.vbox)
if parent.dark_mode_used:
self.setStyleSheet('\n QHeaderView::section { background-color: #313233; color: white; }\n QTableCornerButton::section {\n background-color: #313233;\n }\n ')
def on_remove_clicked(self, id):
row_len = self.t_view.model().rowCount()
for r in range(row_len):
n_id = self.t_view.model().item(r, 0).data()
if (n_id == id):
self.t_view.model().removeRow(r)
remove_from_priority_list(n_id)
break
def get_model(self, priority_list):
model = PriorityListModel(self)
config = mw.addonManager.getConfig(__name__)
if self.parent.dark_mode_used:
tag_bg = config['styles.night.tagBackgroundColor']
tag_fg = config['styles.night.tagForegroundColor']
else:
tag_bg = config['styles.tagBackgroundColor']
tag_fg = config['styles.tagForegroundColor']
for (c, pitem) in enumerate(priority_list):
text = (pitem.title if ((pitem.title is not None) and (len(pitem.title.strip()) > 0)) else 'Untitled')
text = ('<b>%s</b>' % text)
tags = pitem.tags
if ((tags is not None) and (len(tags.strip()) > 0)):
tag_sep = (" </span> <span style='color: %s; background-color: %s; margin-right: 5px; border: none; border-radius: 5px;'> " % (tag_fg, tag_bg))
tags = ("<span style='color: %s; background-color: %s; margin-right: 5px; border: none; border-radius: 5px;'> %s </span>" % (tag_fg, tag_bg, tag_sep.join([t for t in tags.split(' ') if (len(t) > 0)])))
item = QStandardItem(text)
item.setData(QVariant(pitem.id))
item.setFlags(((Qt.ItemIsEnabled | Qt.ItemIsSelectable) | Qt.ItemIsDragEnabled))
model.setItem(c, 0, item)
titem = QStandardItem(tags)
titem.setFlags(((Qt.ItemIsEnabled | Qt.ItemIsSelectable) | Qt.ItemIsDragEnabled))
model.setItem(c, 1, titem)
oitem = QStandardItem()
oitem.setFlags(((Qt.ItemIsEnabled | Qt.ItemIsSelectable) | Qt.ItemIsDragEnabled))
model.setItem(c, 2, oitem)
model.setHeaderData(0, Qt.Horizontal, 'Title')
model.setHeaderData(1, Qt.Horizontal, 'Tags')
model.setHeaderData(2, Qt.Horizontal, 'Actions')
return model
def set_remove_btns(self, priority_list):
for r in range(len(priority_list)):
rem_btn = QToolButton()
rem_btn.setText(' - ')
rem_btn.clicked.connect(functools.partial(self.on_remove_clicked, priority_list[r].id))
self.t_view.setIndexWidget(self.t_view.model().index(r, 2), rem_btn) |
def json_path_to_examples(data_path, NLP):
data = srsly.read_json(data_path)
docs = json_to_docs(data)
docbin = DocBin()
for doc in docs:
docbin.add(doc)
docs = docbin.get_docs(NLP.vocab)
examples = [Example(NLP.make_doc(doc.text), doc) for doc in docs]
return examples |
class TestRestartFromFailure():
(scope='function')
def url(self, db, privacy_request):
return (V1_URL_PREFIX + PRIVACY_REQUEST_RETRY.format(privacy_request_id=privacy_request.id))
def test_restart_from_failure_not_authenticated(self, api_client, url):
response = api_client.post(url, headers={})
assert (response.status_code == 401)
def test_restart_from_failure_wrong_scope(self, api_client, url, generate_auth_header):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_READ])
response = api_client.post(url, headers=auth_header)
assert (response.status_code == 403)
def test_restart_from_failure_not_errored(self, api_client, url, generate_auth_header, privacy_request):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])
response = api_client.post(url, headers=auth_header)
assert (response.status_code == 400)
assert (response.json()['detail'] == f"Cannot restart privacy request from failure: privacy request '{privacy_request.id}' status = in_processing.")
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_restart_from_failure_no_stopped_step(self, submit_mock, api_client, url, generate_auth_header, db, privacy_request):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])
privacy_request.status = PrivacyRequestStatus.error
privacy_request.save(db)
response = api_client.post(url, headers=auth_header)
assert (response.status_code == 200)
db.refresh(privacy_request)
assert (privacy_request.status == PrivacyRequestStatus.in_processing)
submit_mock.assert_called_with(privacy_request_id=privacy_request.id, from_step=None, from_webhook_id=None)
privacy_request.delete(db)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_restart_from_failure_from_specific_collection(self, submit_mock, api_client, url, generate_auth_header, db, privacy_request):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])
privacy_request.status = PrivacyRequestStatus.error
privacy_request.save(db)
privacy_request.cache_failed_checkpoint_details(step=CurrentStep.access, collection=CollectionAddress('test_dataset', 'test_collection'))
response = api_client.post(url, headers=auth_header)
assert (response.status_code == 200)
db.refresh(privacy_request)
assert (privacy_request.status == PrivacyRequestStatus.in_processing)
submit_mock.assert_called_with(privacy_request_id=privacy_request.id, from_step=CurrentStep.access.value, from_webhook_id=None)
('fides.api.service.privacy_request.request_runner_service.run_privacy_request.delay')
def test_restart_from_failure_outside_graph(self, submit_mock, api_client, url, generate_auth_header, db, privacy_request):
auth_header = generate_auth_header(scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])
privacy_request.status = PrivacyRequestStatus.error
privacy_request.save(db)
privacy_request.cache_failed_checkpoint_details(step=CurrentStep.email_post_send, collection=None)
response = api_client.post(url, headers=auth_header)
assert (response.status_code == 200)
db.refresh(privacy_request)
assert (privacy_request.status == PrivacyRequestStatus.in_processing)
submit_mock.assert_called_with(privacy_request_id=privacy_request.id, from_step=CurrentStep.email_post_send.value, from_webhook_id=None) |
def on_message(client, userdata, msg):
if (not msg.topic.startswith('$SYS')):
try:
key = msg.topic.replace('/', '.').lower()
if len(prefix):
key = ('%s.%s' % (prefix, key))
val = EEGsynth.rescale(float(msg.payload), slope=output_scale, offset=output_offset)
monitor.update(key, val)
patch.setvalue(key, val)
except:
pass |
class Paginator(object):
LIMIT = None
OFFSET = 0
ORDER = 'id'
def __init__(self, query, model, limit=None, offset=None, order=None, order_type=None, **kwargs):
self.query = query
self.model = model
self.limit = (limit or self.LIMIT)
self.offset = (offset or self.OFFSET)
self.order = (order or self.ORDER)
self.order_type = order_type
if (not self.order_type):
if (self.order == 'id'):
self.order_type = 'DESC'
if (self.order == 'name'):
self.order_type = 'ASC'
def get(self):
return self.paginate_query(self.query)
def paginate_query(self, query):
order_attr = getattr(self.model, self.order, None)
if (not order_attr):
msg = "Cannot order by {}, {} doesn't have such property".format(self.order, self.model.__tablename__)
raise CoprHttpException(msg)
if (not isinstance(order_attr, InstrumentedAttribute)):
raise CoprHttpException('Cannot order by {}'.format(self.order))
order_fun = (lambda x: x)
if (self.order_type == 'ASC'):
order_fun = sqlalchemy.asc
elif (self.order_type == 'DESC'):
order_fun = sqlalchemy.desc
return query.order_by(order_fun(order_attr)).limit(self.limit).offset(self.offset)
def meta(self):
return {k: getattr(self, k) for k in ['limit', 'offset', 'order', 'order_type']}
def map(self, fun):
return [fun(x) for x in self.get()]
def to_dict(self):
return [x.to_dict() for x in self.get()] |
(scope='function')
def privacy_request_status_canceled(db: Session, policy: Policy) -> PrivacyRequest:
privacy_request = _create_privacy_request_for_policy(db, policy, PrivacyRequestStatus.canceled)
privacy_request.started_processing_at = None
privacy_request.save(db)
(yield privacy_request)
privacy_request.delete(db) |
def mock_module_disabled(self, cmd):
if ('modprobe' in cmd):
output = ['install /bin/true ']
error = ['']
returncode = 0
elif ('lsmod' in cmd):
output = ['']
error = ['']
returncode = 1
return SimpleNamespace(stdout=output, stderr=error, returncode=returncode) |
def _discover_minimum_mtu_to_target(address, port):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((address, port))
s.setsockopt(socket.IPPROTO_IP, IP_MTU_DISCOVER, IP_MTU_DISCOVER_DO)
for attempt in MTU_ATTEMPTS:
try:
s.send((b'#' * (attempt - DATAGRAM_HEADER_LENGTH_IN_BYTES)))
return attempt
except socket.error as e:
if ('too long' not in e.strerror):
break
return WORST_CASE_MTU_IP |
.django_db
def test_missing_spending_type(client, monkeypatch, generic_account_data, helpers):
helpers.patch_datetime_now(monkeypatch, 2022, 12, 31)
resp = helpers.post_for_spending_endpoint(client, url, def_codes=['A'])
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY)
assert (resp.data['detail'] == "Missing value: 'spending_type' is a required field") |
def mock_audit_events_for_successful_file_system_mounts_are_collected_pass(self, cmd):
if ('auditctl' in cmd):
stdout = ['-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=-1 -F key=mounts', '-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=-1 -F key=mounts']
else:
stdout = ['-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!= -k mounts', '-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!= -k mounts']
stderr = ['']
returncode = 0
return SimpleNamespace(returncode=returncode, stderr=stderr, stdout=stdout) |
.django_db
def test_budget_function_list_sort_by_obligated_amount(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
query_params = f'?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=asc&sort=obligated_amount'
resp = client.get(url.format(code='007', query_params=query_params))
expected_result = {'fiscal_year': helpers.get_mocked_current_fiscal_year(), 'toptier_code': '007', 'messages': [], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'next': None, 'page': 1, 'previous': None, 'total': 3, 'limit': 10}, 'results': [{'gross_outlay_amount': 1000000.0, 'name': 'NAME 5', 'obligated_amount': 10.0, 'children': [{'gross_outlay_amount': 1000000.0, 'name': 'NAME 5A', 'obligated_amount': 10.0}]}, {'gross_outlay_amount': 100000.0, 'name': 'NAME 6', 'obligated_amount': 100.0, 'children': [{'gross_outlay_amount': 100000.0, 'name': 'NAME 6A', 'obligated_amount': 100.0}]}, {'gross_outlay_amount': .0, 'name': 'NAME 1', 'obligated_amount': 111.0, 'children': [{'gross_outlay_amount': .0, 'name': 'NAME 1A', 'obligated_amount': 111.0}]}]}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result)
query_params = f'?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=desc&sort=obligated_amount'
resp = client.get(url.format(code='007', query_params=query_params))
expected_result = {'fiscal_year': helpers.get_mocked_current_fiscal_year(), 'toptier_code': '007', 'messages': [], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'next': None, 'page': 1, 'previous': None, 'total': 3, 'limit': 10}, 'results': [{'gross_outlay_amount': .0, 'name': 'NAME 1', 'obligated_amount': 111.0, 'children': [{'gross_outlay_amount': .0, 'name': 'NAME 1A', 'obligated_amount': 111.0}]}, {'gross_outlay_amount': 100000.0, 'name': 'NAME 6', 'obligated_amount': 100.0, 'children': [{'gross_outlay_amount': 100000.0, 'name': 'NAME 6A', 'obligated_amount': 100.0}]}, {'gross_outlay_amount': 1000000.0, 'name': 'NAME 5', 'obligated_amount': 10.0, 'children': [{'gross_outlay_amount': 1000000.0, 'name': 'NAME 5A', 'obligated_amount': 10.0}]}]}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result) |
class TestTask(GymTestCase):
def test__init__(self):
assert (self.task.nb_steps == self.nb_steps)
assert (self.task.is_rl_agent_training is False)
def test_properties(self):
assert (self.task.proxy_env == self.task._proxy_env)
assert (self.task.proxy_env_queue == self.task._proxy_env.queue)
def test_setup(self):
with patch.object(self.logger, 'info') as mock_logger:
self.task.setup()
mock_logger.assert_any_call('Gym task: setup method called.')
def test_execute_i(self):
self.task.proxy_env._is_rl_agent_trained = False
self.task.is_rl_agent_training = False
with patch.object(self.task._rl_agent, 'fit') as mock_fit:
with patch.object(self.logger, 'info') as mock_logger:
self.task.execute()
mock_logger.assert_any_call('Training starting ...')
assert (self.task.is_rl_agent_training is True)
mock_fit.assert_called_with(self.task.proxy_env, self.nb_steps)
mock_logger.assert_any_call('Training finished. You can exit now via CTRL+C.')
def test_execute_ii(self):
self.task.proxy_env._is_rl_agent_trained = True
self.task.is_rl_agent_training = True
with patch.object(self.task.proxy_env, 'close') as mock_close:
self.task.execute()
assert (self.task.is_rl_agent_training is False)
mock_close.assert_called_once()
def test_teardown(self):
self.task.is_rl_agent_training = True
with patch.object(self.task.proxy_env, 'close') as mock_close:
with patch.object(self.logger, 'info') as mock_logger:
self.task.teardown()
mock_logger.assert_any_call('Gym Task: teardown method called.')
assert (self.task.is_rl_agent_training is False)
mock_close.assert_called_once() |
class Migration(migrations.Migration):
dependencies = [('admin_interface', '0020_module_selected_colors')]
operations = [migrations.AlterField(model_name='theme', name='favicon', field=models.FileField(blank=True, help_text='(.ico|.png|.gif - 16x16|32x32 px)', upload_to='admin-interface/favicon/', validators=[FileExtensionValidator(allowed_extensions=['gif', 'ico', 'jpg', 'jpeg', 'png', 'svg'])], verbose_name='favicon')), migrations.AlterField(model_name='theme', name='logo', field=models.FileField(blank=True, help_text='Leave blank to use the default Django logo', upload_to='admin-interface/logo/', validators=[FileExtensionValidator(allowed_extensions=['gif', 'jpg', 'jpeg', 'png', 'svg'])], verbose_name='logo'))] |
def extractBallKickingGangBoss(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
if ('jinsei' in item['tags']):
return buildReleaseMessageWithType(item, "I'll Live My Second Life!", vol, chp, frag=frag, postfix=postfix)
return False |
def setUpModule():
Practice.objects.create(code='N84014', name='AINSDALE VILLAGE SURGERY', address1='THE SURGERY', address2='2 LEAMINGTON RD AINSDALE', address3='SOUTHPORT', address4='MERSEYSIDE', postcode='PR8 3LB')
Practice.objects.create(code='G82650', name='MOCKETTS WOOD SURGERY', address1="THE MOCKETT'S WOOD SURG.", address2='HOPEVILLE AVE ST PETERSY', address3='BROADSTAIRS', address4='KENT', postcode='CT10 2TR') |
class Serializer(BaseSerializer, _Serializer, DRFSerializer):
_property
async def adata(self):
ret = (await super().adata)
return ReturnDict(ret, serializer=self)
async def ato_representation(self, instance):
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
is_drf_field = (type(field) in DRFModelSerializer.serializer_field_mapping.values())
check_for_none = (attribute.pk if isinstance(attribute, models.Model) else attribute)
if (check_for_none is None):
ret[field.field_name] = None
else:
if is_drf_field:
repr = field.to_representation(attribute)
else:
repr = (await field.ato_representation(attribute))
ret[field.field_name] = repr
return ret |
class SpecialConv2dBiasAct(Module):
def __init__(self, op_name, in_channels, out_channels, kernel_size, stride, padding=0, dilation=1, auto_padding=True, dtype='float16'):
super().__init__()
if (auto_padding and (in_channels < 4)):
in_channels = 4
elif (auto_padding and (in_channels > 4) and (in_channels < 8)):
in_channels = 8
self.weight = Parameter(shape=[out_channels, kernel_size, kernel_size, in_channels], dtype=dtype)
self.bias = Parameter(shape=[out_channels], dtype=dtype)
op_func = getattr(ops, op_name)
self.op = op_func(stride=stride, pad=padding, dilate=dilation, auto_padding=auto_padding)
def forward(self, *args):
assert (len(args) == 1)
x = args[0]
return self.op(x, self.weight.tensor(), self.bias.tensor()) |
def test_call_with_init_positional_args():
provider = providers.Factory(Example, 'i1', 'i2')
instance1 = provider()
instance2 = provider()
assert (instance1.init_arg1 == 'i1')
assert (instance1.init_arg2 == 'i2')
assert (instance2.init_arg1 == 'i1')
assert (instance2.init_arg2 == 'i2')
assert (instance1 is not instance2)
assert isinstance(instance1, Example)
assert isinstance(instance2, Example) |
(STORAGE_DEFAULT, dependencies=[Security(verify_oauth_client, scopes=[STORAGE_READ])], response_model=Page[StorageDestinationResponse])
def get_default_configs(*, db: Session=Depends(deps.get_db), params: Params=Depends()) -> AbstractPage[StorageConfig]:
logger.info('Finding default storage configurations with pagination params {}', params)
return paginate(db.query(StorageConfig).filter_by(is_default=True).order_by(StorageConfig.created_at.desc()), params=params) |
class LedgerApiHandler(Handler):
SUPPORTED_PROTOCOL = LedgerApiMessage.protocol_id
def setup(self) -> None:
def handle(self, message: Message) -> None:
self.context.logger.info('Handling ledger api msg')
ledger_api_msg = cast(LedgerApiMessage, message)
ledger_api_dialogues = cast(LedgerApiDialogues, self.context.ledger_api_dialogues)
ledger_api_dialogue = cast(Optional[LedgerApiDialogue], ledger_api_dialogues.update(ledger_api_msg))
if (ledger_api_dialogue is None):
self._handle_unidentified_dialogue(ledger_api_msg)
return
if (ledger_api_msg.performative is LedgerApiMessage.Performative.STATE):
self._handle_state(ledger_api_msg)
elif (ledger_api_msg.performative == LedgerApiMessage.Performative.ERROR):
self._handle_error(ledger_api_msg, ledger_api_dialogue)
else:
self._handle_invalid(ledger_api_msg, ledger_api_dialogue)
def teardown(self) -> None:
def _handle_unidentified_dialogue(self, ledger_api_msg: LedgerApiMessage) -> None:
self.context.logger.info('received invalid ledger_api message={}, unidentified dialogue.'.format(ledger_api_msg))
def _handle_state(self, ledger_api_msg: LedgerApiMessage) -> None:
self.context.logger.debug(f'Handling ledger API message: {ledger_api_msg}')
block_info = ledger_api_msg.state.body
block_height_str = block_info.get('block', {}).get('header', {}).get('height', None)
if block_height_str:
block_height = int(block_height_str)
else:
block_height = None
if (block_height is None):
self.context.logger.info('block height not present')
else:
self.context.logger.info(('Retrieved latest block: ' + str({'block_height': block_height})))
self.context.shared_state['observation'] = {'block': block_info}
def _handle_error(self, ledger_api_msg: LedgerApiMessage, ledger_api_dialogue: LedgerApiDialogue) -> None:
self.context.logger.info('received ledger_api error message={} in dialogue={}.'.format(ledger_api_msg, ledger_api_dialogue))
def _handle_invalid(self, ledger_api_msg: LedgerApiMessage, ledger_api_dialogue: LedgerApiDialogue) -> None:
self.context.logger.warning('cannot handle ledger_api message of performative={} in dialogue={}.'.format(ledger_api_msg.performative, ledger_api_dialogue)) |
class OptionTooltipDatetimelabelformats(Options):
def day(self):
return self._config_get('%A, %e %b %Y')
def day(self, text: str):
self._config(text, js_type=False)
def hour(self):
return self._config_get('%A, %e %b, %H:%M')
def hour(self, text: str):
self._config(text, js_type=False)
def millisecond(self):
return self._config_get('%A, %e %b, %H:%M:%S.%L')
def millisecond(self, text: str):
self._config(text, js_type=False)
def minute(self):
return self._config_get('%A, %e %b, %H:%M')
def minute(self, text: str):
self._config(text, js_type=False)
def month(self):
return self._config_get('%B %Y')
def month(self, text: str):
self._config(text, js_type=False)
def second(self):
return self._config_get('%A, %e %b, %H:%M:%S')
def second(self, text: str):
self._config(text, js_type=False)
def week(self):
return self._config_get('Week from %A, %e %b %Y')
def week(self, text: str):
self._config(text, js_type=False)
def year(self):
return self._config_get('%Y')
def year(self, text: str):
self._config(text, js_type=False) |
def test_inverse_transform_when_ignore_unseen():
df1 = pd.DataFrame({'words': ['dog', 'dog', 'cat', 'cat', 'cat', 'bird']})
df2 = pd.DataFrame({'words': ['dog', 'dog', 'cat', 'cat', 'cat', 'frog']})
df3 = pd.DataFrame({'words': ['dog', 'dog', 'cat', 'cat', 'cat', nan]})
y = [1, 0, 1, 0, 1, 0]
enc = MeanEncoder(unseen='ignore')
enc.fit(df1, y)
dft = enc.transform(df2)
pd.testing.assert_frame_equal(enc.inverse_transform(dft), df3) |
('dalton')
def test_h2o_opt():
geom = geom_loader('lib:h2o.xyz', coord_type='redund')
calc = Dalton(basis='3-21G')
geom.set_calculator(calc)
opt = RFOptimizer(geom, thresh='gau_tight')
opt.run()
assert opt.is_converged
assert (opt.cur_cycle == 4)
assert (geom.energy == pytest.approx((- 75.))) |
def server_functional(host, port, dbtype=DB_TYPE_HMM, qtype=QUERY_TYPE_SEQ):
if server_up(host, port):
try:
if (qtype == QUERY_TYPE_SEQ):
get_hits('test', 'TESTSEQ', host, port, dbtype, qtype=qtype)
elif (qtype == QUERY_TYPE_HMM):
get_hits('test', test_hmm, host, port, dbtype, qtype=qtype)
else:
raise Exception(f'Unrecognized qtype: {qtype}')
except Exception as e:
return False
else:
return True
return False |
def extractLovexsweetWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Product(TestCase, Common):
def setUp(self):
self.seq = nutils.elementseq._Product(nutils.elementseq.References.from_iter([square, triangle], 2), nutils.elementseq.References.uniform(line, 3))
self.check = (([(square * line)] * 3) + ([(triangle * line)] * 3))
self.checkndims = 3
super().setUp() |
class Reject(InstantSpellCardAction):
def __init__(self, source, target_act):
self.source = source
self.target_act = target_act
self.target = target_act.target
def apply_action(self):
if (not isinstance(self.target_act, SpellCardAction)):
return False
self.target_act.cancelled = True
return True |
class TestSparseMaskChannel():
def test_sparse_model_size(self) -> None:
model = FCModel()
params_to_prune = [(model.fc1, 'weight'), (model.fc1, 'bias'), (model.fc2, 'weight'), (model.fc2, 'bias'), (model.fc3, 'weight'), (model.fc3, 'bias')]
prune.global_unstructured(params_to_prune, pruning_method=prune.L1Unstructured, amount=0.75)
for (module, name) in params_to_prune:
prune.remove(module, name)
sparsity = utils.calc_model_sparsity(model.state_dict())
assertAlmostEqual(0.75, sparsity, delta=0.02)
.parametrize('config', [SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='topk'), SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='random')])
.parametrize('expected_type', [SparseMaskChannel])
def test_random_mask_instantiation(self, config: Type, expected_type: Type) -> None:
channel = instantiate(config)
assertIsInstance(channel, expected_type)
.parametrize('config', [SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='topk'), SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='random')])
.parametrize('expected_type', [SparseMaskChannel])
def test_random_mask_server_to_client(self, config: Type, expected_type: Type) -> None:
channel = instantiate(config)
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
download_model = FLModelParamUtils.clone(base_model)
message = Message(download_model)
message = channel.server_to_client(message)
mismatched = FLModelParamUtils.get_mismatched_param([base_model.fl_get_module(), download_model.fl_get_module()])
assertEqual(mismatched, '', mismatched)
.parametrize('config', [SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='topk'), SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='random')])
.parametrize('expected_type', [SparseMaskChannel])
def test_sparse_mask_client_to_server(self, config: Type, expected_type: Type) -> None:
channel = instantiate(config)
two_fc = utils.TwoFC()
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
message = Message(upload_model)
message = channel.client_to_server(message)
sparsity = utils.calc_model_sparsity(message.model_state_dict)
assertAlmostEqual(channel.cfg.proportion_of_zero_weights, sparsity, delta=0.05)
.parametrize('config', [SparseMaskChannelConfig(proportion_of_zero_weights=0.6, sparsity_method='topk')])
.parametrize('expected_type', [SparseMaskChannel])
def test_topk_mask_sparsity(self, config: Type, expected_type: Type) -> None:
channel = instantiate(config)
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
upload_model = FLModelParamUtils.clone(base_model)
message = Message(upload_model)
message = channel.client_to_server(message)
for (name, p) in base_model.fl_get_module().named_parameters():
flattened_params = p.flatten().abs()
sparse_indices = flattened_params.abs().argsort()[:int((config.proportion_of_zero_weights * flattened_params.numel()))]
flattened_message_params = torch.cat([torch.flatten(p) for p in message.model_state_dict[name]]).flatten()
assertEqual(flattened_message_params[sparse_indices].sum(), 0.0)
.parametrize('sparsity_method', ['topk', 'random'])
.parametrize('compressed_size_measurement', ['bitmask', 'coo'])
.parametrize('expected_type', [SparseMaskChannel])
def test_sparse_mask_stats(self, sparsity_method: str, compressed_size_measurement: str, expected_type: Type) -> None:
config = SparseMaskChannelConfig(proportion_of_zero_weights=0.6, report_communication_metrics=True, sparsity_method=sparsity_method, compressed_size_measurement=compressed_size_measurement)
channel = instantiate(config)
two_fc = utils.TwoFC()
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
message = Message(upload_model)
message = channel.client_to_server(message)
stats = channel.stats_collector.get_channel_stats()
client_to_server_bytes = stats[ChannelDirection.CLIENT_TO_SERVER].mean()
n_weights = sum([p.numel() for p in two_fc.parameters() if (p.ndim == 2)])
n_biases = sum([p.numel() for p in two_fc.parameters() if (p.ndim == 1)])
non_zero_weights = (n_weights - int((n_weights * channel.cfg.proportion_of_zero_weights)))
non_zero_biases = (n_biases - int((n_biases * channel.cfg.proportion_of_zero_weights)))
n_dim_weights = 2
n_dim_biases = 1
true_size_bytes_weights = ((((non_zero_weights * SparseMaskChannel.BYTES_PER_INT64) * n_dim_weights) if (compressed_size_measurement == 'coo') else (SparseMaskChannel.BYTES_PER_BIT * n_weights)) + (non_zero_weights * SparseMaskChannel.BYTES_PER_FP32))
true_size_bytes_biases = ((((non_zero_biases * SparseMaskChannel.BYTES_PER_INT64) * n_dim_biases) if (compressed_size_measurement == 'coo') else (SparseMaskChannel.BYTES_PER_BIT * n_biases)) + (non_zero_biases * SparseMaskChannel.BYTES_PER_FP32))
true_size_bytes = (true_size_bytes_weights + true_size_bytes_biases)
assertEqual(client_to_server_bytes, true_size_bytes)
.parametrize('sparsity_method', ['topk', 'random'])
def test_sparsity_after_reception(self, sparsity_method: str) -> None:
config = SparseMaskChannelConfig(proportion_of_zero_weights=0.6, report_communication_metrics=True, sparsity_method=sparsity_method)
channel = instantiate(config)
two_fc = utils.TwoFC()
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
message = Message(upload_model)
message = channel.client_to_server(message)
state_dict = message.model.fl_get_module().state_dict()
assertAlmostEqual(utils.calc_model_sparsity(state_dict), 0.6, delta=0.05) |
def get_pubkey(username, projectname, log, sign_domain, outfile=None):
usermail = create_gpg_email(username, projectname, sign_domain)
cmd = [SIGN_BINARY, '-u', usermail, '-p']
(returncode, stdout, stderr) = call_sign_bin(cmd, log)
if (returncode != 0):
if ('unknown key:' in stderr):
raise CoprSignNoKeyError('There are no gpg keys for user {} in keyring'.format(username), return_code=returncode, cmd=cmd, stdout=stdout, stderr=stderr)
raise CoprSignError(msg='Failed to get user pubkey\nsign stdout: {}\n sign stderr: {}\n'.format(stdout, stderr), return_code=returncode, cmd=cmd, stdout=stdout, stderr=stderr)
if outfile:
with open(outfile, 'w') as handle:
handle.write(stdout)
return stdout |
def unsign(wheelfile):
warn_signatures()
vzf = VerifyingZipFile(wheelfile, 'a')
info = vzf.infolist()
if (not (len(info) and info[(- 1)].filename.endswith('/RECORD.jws'))):
raise WheelError('The wheel is not signed (RECORD.jws not found at end of the archive).')
vzf.pop()
vzf.close() |
class OptionSeriesLollipopSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
def test_main(modules, serial_override=None):
print(('testing module %s' % modules))
(requested_test_classes, regex_test_classes, clean, dumpfail, debug, keep_logs, nocheck, serial, repeat, excluded_test_classes, report_json_filename, port_order, start_port, loglevel, profile) = parse_args()
if (serial_override is not None):
print('overriding serial to ', serial_override)
serial = serial_override
setLogLevel(loglevel)
if clean:
print('Cleaning up test interfaces, processes and openvswitch configuration from previous test runs')
Cleanup.cleanup()
sys.exit(0)
if nocheck:
print('Skipping dependency checks')
elif (not check_dependencies()):
print('dependency check failed. check required library/binary list in header of this script')
sys.exit((- 1))
print('port order: -o', ','.join((str(i) for i in port_order)))
print(('start port: --port %s' % start_port))
hw_config = import_hw_config()
if profile:
pr = cProfile.Profile(time.time)
pr.enable()
run_tests(modules, hw_config, requested_test_classes, regex_test_classes, dumpfail, debug, keep_logs, serial, repeat, excluded_test_classes, report_json_filename, port_order, start_port)
if profile:
pr.disable()
ps = pstats.Stats(pr).sort_stats('cumulative')
ps.print_stats() |
class UntypedActivityStub():
_decision_context: object = None
_retry_parameters: RetryParameters = None
_activity_options: ActivityOptions = None
async def execute(self, activity_name: str, *args):
f = (await self.execute_async(activity_name, *args))
return (await f.wait_for_result())
async def execute_async(self, activity_name: str, *args):
from .async_activity import Async
execute_parameters = ExecuteActivityParameters()
execute_parameters.activity_type = ActivityType()
execute_parameters.activity_type.name = activity_name
return Async.call(self, execute_parameters, args) |
class PurgatoryMode(BaseMode):
name = Mode.purgatory
keymap = {Action.regain_control: False}
def __init__(self):
super().__init__()
self.is_window_active = None
self.dim_overlay = GenericMenu(title='Click to continue...', frame_color=(0, 0, 0, 0.4), title_pos=(0, 0, (- 0.2)))
self.dim_overlay.hide()
def enter(self):
mouse.mode(MouseMode.ABSOLUTE)
self.dim_overlay.show()
self.register_keymap_event('mouse1-up', Action.regain_control, True)
self.register_keymap_event('mouse1-down', Action.regain_control, False)
tasks.add(self.purgatory_task, 'purgatory_task')
tasks.add(self.shared_task, 'shared_task')
def exit(self):
tasks.remove('shared_task')
tasks.remove('purgatory_task')
Global.clock.setFrameRate(ani.settings['graphics']['fps'])
self.dim_overlay.hide()
def purgatory_task(self, task):
if self.keymap[Action.regain_control]:
Global.mode_mgr.change_mode(Global.mode_mgr.last_mode)
is_window_active = Global.base.win.get_properties().foreground
if (is_window_active is not self.is_window_active):
if is_window_active:
Global.clock.setFrameRate(ani.settings['graphics']['fps'])
else:
Global.clock.setFrameRate(ani.settings['graphics']['fps_inactive'])
self.is_window_active = is_window_active
return task.cont |
class InetZoneIndex(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
if mibBuilder.loadTexts:
description = 'A zone index identifies an instance of a zone of a specific scope. The zone\nindex MUST disambiguate identical address values. For link-local addresses, the\nzone index will typically be the interface index (ifIndex as defined in the IF-\nMIB) of the interface on which the address is configured. The zone index may\ncontain the special value 0, which refers to the default zone. The default zone\nmay be used in cases where the valid zone index is not known (e.g., when a\nmanagement application has to write a link-local IPv6 address without knowing\nthe interface index value). The default zone SHOULD NOT be used as an easy way\nout in cases where the zone index for a non-global IPv6 address is known.\n' |
def configure_fn(apikey: str) -> None:
def auth(req):
req.headers[HEADER_APIKEY] = apikey
return req
if os.path.exists(CREDENTIAL_FILE):
with open(CREDENTIAL_FILE, encoding='utf-8') as fp:
auth_json = json.load(fp)
email = auth_json['email']
password = auth_json['password']
if (email and password):
if migrate():
click.echo('Migrate successfully. auth.json is renamed to auth.json.bak.')
return
if (not apikey):
current_apikey = get_description()
message = (f'''Current API key: [{current_apikey}]
''' if current_apikey else '')
apikey = click.prompt(f'{message}Please enter your api key', type=str)
try:
resp = requests.get(f'{Env.current.web_api_endpoint}/apikey', auth=auth, verify=Env.current.ssl_verify)
except (requests.exceptions.SSLError, ssl.SSLError):
resp = requests.get(f'{Env.current.web_api_endpoint}/apikey', auth=auth, verify=False)
if (resp.status_code == 200):
click.echo('Configured successfully.')
with open(CONFIG_FILE, 'w+', encoding='utf-8') as config_file:
toml_config = toml.loads(config_file.read())
toml_config.update({KEY_APIKEY: apikey})
config_file.write(toml.dumps(toml_config))
else:
click.echo('API key is invalid.') |
_member_required
def remove_document_metadata(request, uuid, metadata_uuid=None):
uuid = UUID(uuid)
doc = get_object_or_404(Document, uuid=uuid)
if (metadata_uuid is not None):
try:
has = DocumentHasBinaryMetadata.objects.all().get(document=doc, metadata=metadata_uuid)
except DocumentHasBinaryMetadata.DoesNotExist:
has = get_object_or_404(DocumentHasTextMetadata, document=doc, metadata=metadata_uuid)
m = has.metadata
try:
has.delete()
doc.set_last_modified()
messages.add_message(request, messages.SUCCESS, f'Removed metadata "{m.name}".')
except Exception as exc:
messages.add_message(request, messages.ERROR, f'Metadata "{m.name}" could not be removed: {exc}.')
return redirect(doc) |
def test_partitioned_analyses_raises_exception_at_init_if_partitions_is_none_and_value_gt_than_255():
d = DumbPartDistinguisher()
with pytest.raises(ValueError, match='max value for intermediate data is greater than 255'):
d.update(traces=np.random.randint(0, 255, (500, 200), dtype='int16'), data=np.random.randint(0, 3000, (500, 16), dtype='uint16')) |
class SearchFilterFkTests(TestCase):
def test_must_call_distinct(self):
filter_ = filters.SearchFilter()
prefixes = ([''] + list(filter_.lookup_prefixes))
for prefix in prefixes:
assert (not filter_.must_call_distinct(SearchFilterModelFk._meta, [('%stitle' % prefix)]))
assert (not filter_.must_call_distinct(SearchFilterModelFk._meta, [('%stitle' % prefix), ('%sattribute__label' % prefix)]))
def test_must_call_distinct_restores_meta_for_each_field(self):
filter_ = filters.SearchFilter()
prefixes = ([''] + list(filter_.lookup_prefixes))
for prefix in prefixes:
assert (not filter_.must_call_distinct(SearchFilterModelFk._meta, [('%sattribute__label' % prefix), ('%stitle' % prefix)]))
def test_custom_lookup_to_related_model(self):
filter_ = filters.SearchFilter()
assert ('attribute__label__icontains' == filter_.construct_search('attribute__label', SearchFilterModelFk._meta))
assert ('attribute__label__iendswith' == filter_.construct_search('attribute__label__iendswith', SearchFilterModelFk._meta)) |
def test_receipts_request_with_extra_unrequested_receipts():
headers_bundle = mk_headers(1, 3, 2, 5, 4)
(headers, receipts, trie_roots_and_data) = zip(*headers_bundle)
receipts_bundle = tuple(zip(receipts, trie_roots_and_data))
wrong_headers = mk_headers(4, 3, 8)
(_, wrong_receipts, wrong_trie_roots_and_data) = zip(*wrong_headers)
extra_receipts_bundle = tuple(zip(wrong_receipts, wrong_trie_roots_and_data))
validator = ReceiptsValidator(headers)
with pytest.raises(ValidationError):
validator.validate_result((receipts_bundle + extra_receipts_bundle)) |
class OptionSeriesBarDragdropDraghandle(Options):
def className(self):
return self._config_get('highcharts-drag-handle')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#fff')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get(None)
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('rgba(0, 0, 0, 0.6)')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def pathFormatter(self):
return self._config_get(None)
def pathFormatter(self, value: Any):
self._config(value, js_type=False)
def zIndex(self):
return self._config_get(901)
def zIndex(self, num: float):
self._config(num, js_type=False) |
class HomeConnectStatusSensor(SensorEntity):
should_poll = True
_attr_has_entity_name = True
def __init__(self, homeconnect: HomeConnect) -> None:
self._homeconnect = homeconnect
self.entity_id = f'home_connect.{self.unique_id}'
def device_info(self):
return HOME_CONNECT_DEVICE
def unique_id(self) -> str:
return 'homeconnect_status'
def translation_key(self) -> str:
return 'homeconnect_status'
def available(self) -> bool:
return True
def native_value(self):
return GlobalStatus.get_status().name
def extra_state_attributes(self) -> (Mapping[(str, Any)] | None):
return {'blocked_until': GlobalStatus.get_blocked_until(), 'blocked_for': GlobalStatus.get_block_time_str()} |
def lazy_import():
from fastly.model.read_only_customer_id import ReadOnlyCustomerId
from fastly.model.read_only_id import ReadOnlyId
from fastly.model.read_only_user_id import ReadOnlyUserId
globals()['ReadOnlyCustomerId'] = ReadOnlyCustomerId
globals()['ReadOnlyId'] = ReadOnlyId
globals()['ReadOnlyUserId'] = ReadOnlyUserId |
class MH_Trace(command_line.MISS_HIT_Back_End):
def __init__(self, options):
super().__init__('MH Trace')
self.imp_items = []
self.act_items = []
self.options = options
def process_wp(cls, wp):
lexer = MATLAB_Lexer(wp.cfg.language, wp.mh, wp.get_content(), wp.filename, wp.blockname)
if (not wp.cfg.pragmas):
lexer.process_pragmas = False
if (len(lexer.text.strip()) == 0):
return MH_Trace_Result(wp)
try:
parser = MATLAB_Parser(wp.mh, lexer, wp.cfg)
n_cu = parser.parse_file()
n_ep = get_enclosing_ep(wp.filename)
except Error:
return MH_Trace_Result(wp)
visitor = Function_Visitor(wp.in_test_dir, wp.mh, n_cu, n_ep, wp.blockname)
n_cu.visit(None, visitor, 'Root')
return MH_Trace_Result(wp, visitor.imp_items, visitor.act_items)
def process_simulink_wp(cls, wp):
assert isinstance(wp, work_package.SIMULINK_File_WP)
if (wp.n_content is None):
return MH_Trace_Result(wp)
else:
n_ep = get_enclosing_ep(wp.filename)
walker = Simulink_Walker(in_test_dir=wp.in_test_dir, mh=wp.mh, n_root=wp.n_content, ep=n_ep, inherit=wp.options.untagged_blocks_inherit_tags)
return MH_Trace_Result(wp, walker.imp_items, walker.act_items)
def process_result(self, result):
if result.imp_items:
self.imp_items += result.imp_items
if result.act_items:
self.act_items += result.act_items
def post_process(self):
if self.options.only_tagged_blocks:
self.imp_items = list(filter((lambda x: ((x['language'] != 'Simulink') or x['refs'])), self.imp_items))
self.act_items = list(filter((lambda x: ((x['framework'] != 'Simulink') or x['refs'])), self.act_items))
with open(self.options.out_imp, 'w', encoding='UTF-8') as fd:
data = {'data': self.imp_items, 'generator': 'MH Trace', 'schema': 'lobster-imp-trace', 'version': 3}
json.dump(data, fd, indent=2, sort_keys=True)
fd.write('\n')
with open(self.options.out_act, 'w', encoding='UTF-8') as fd:
data = {'data': self.act_items, 'generator': 'MH Trace', 'schema': 'lobster-act-trace', 'version': 3}
json.dump(data, fd, indent=2, sort_keys=True)
fd.write('\n') |
class DjangoStreamTest(IsolatedAsyncioTestCase):
def setUpClass(cls):
super().setUpClass()
cls.storage = DjangoModelStorage()
pass
('django_eventstream.eventstream.get_storage')
async def test_stream_with_last_event_id_does_not_loop_forever(self, mock_get_storage):
(events_consumer, request) = (await self.__initialise_test(mock_get_storage))
with patch.object(self.storage, 'get_events', wraps=self.storage.get_events) as wrapped_storage:
promise = asyncio.create_task(events_consumer.stream(request))
(await asyncio.sleep(2))
events_consumer.is_streaming = False
(await promise)
self.__assert_all_events_are_retrieved_only_once()
def __assert_all_events_are_retrieved_only_once(self):
self.storage.get_events.assert_any_call(CHANNEL_NAME, INITIAL_EVENT, limit=(EVENTS_LIMIT + 1))
self.storage.get_events.assert_any_call(CHANNEL_NAME, EVENTS_LIMIT, limit=(EVENTS_LIMIT + 1))
async def __initialise_test(self, mock_get_storage):
mock_get_storage.return_value = self.storage
events_consumer = self.__create_events_consumer()
request = self.__create_event_request()
(await self.__populate_db_with_events())
return (events_consumer, request)
def __create_events_consumer(self):
mock_listener = Listener()
mock_listener.aevent.wait = mock_wait
events_consumer = EventsConsumer()
events_consumer.listener = mock_listener
events_consumer.is_streaming = True
events_consumer.base_send = mock_send
return events_consumer
def __create_event_request(self):
request = EventRequest()
request.is_next = False
request.is_recover = False
request.channels = [CHANNEL_NAME]
request.channel_last_ids = {CHANNEL_NAME: INITIAL_EVENT}
return request
_sync_to_async
def __populate_db_with_events(self):
for i in range((EVENTS_LIMIT + EVENTS_OVER_LIMIT)):
self.storage.append_event(CHANNEL_NAME, 'message', 'dummy') |
class AsyncSearchClient(NamespacedClient):
_rewrite_parameters()
async def delete(self, *, id: str, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[(str, t.Sequence[str])]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:
if (id in SKIP_IN_PATH):
raise ValueError("Empty value passed for parameter 'id'")
__path = f'/_async_search/{_quote(id)}'
__query: t.Dict[(str, t.Any)] = {}
if (error_trace is not None):
__query['error_trace'] = error_trace
if (filter_path is not None):
__query['filter_path'] = filter_path
if (human is not None):
__query['human'] = human
if (pretty is not None):
__query['pretty'] = pretty
__headers = {'accept': 'application/json'}
return (await self.perform_request('DELETE', __path, params=__query, headers=__headers))
_rewrite_parameters()
async def get(self, *, id: str, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[(str, t.Sequence[str])]]=None, human: t.Optional[bool]=None, keep_alive: t.Optional[t.Union[('t.Literal[-1]', 't.Literal[0]', str)]]=None, pretty: t.Optional[bool]=None, typed_keys: t.Optional[bool]=None, wait_for_completion_timeout: t.Optional[t.Union[('t.Literal[-1]', 't.Literal[0]', str)]]=None) -> ObjectApiResponse[t.Any]:
if (id in SKIP_IN_PATH):
raise ValueError("Empty value passed for parameter 'id'")
__path = f'/_async_search/{_quote(id)}'
__query: t.Dict[(str, t.Any)] = {}
if (error_trace is not None):
__query['error_trace'] = error_trace
if (filter_path is not None):
__query['filter_path'] = filter_path
if (human is not None):
__query['human'] = human
if (keep_alive is not None):
__query['keep_alive'] = keep_alive
if (pretty is not None):
__query['pretty'] = pretty
if (typed_keys is not None):
__query['typed_keys'] = typed_keys
if (wait_for_completion_timeout is not None):
__query['wait_for_completion_timeout'] = wait_for_completion_timeout
__headers = {'accept': 'application/json'}
return (await self.perform_request('GET', __path, params=__query, headers=__headers))
_rewrite_parameters()
async def status(self, *, id: str, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[(str, t.Sequence[str])]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:
if (id in SKIP_IN_PATH):
raise ValueError("Empty value passed for parameter 'id'")
__path = f'/_async_search/status/{_quote(id)}'
__query: t.Dict[(str, t.Any)] = {}
if (error_trace is not None):
__query['error_trace'] = error_trace
if (filter_path is not None):
__query['filter_path'] = filter_path
if (human is not None):
__query['human'] = human
if (pretty is not None):
__query['pretty'] = pretty
__headers = {'accept': 'application/json'}
return (await self.perform_request('GET', __path, params=__query, headers=__headers))
_rewrite_parameters(body_fields=('aggregations', 'aggs', 'collapse', 'docvalue_fields', 'explain', 'ext', 'fields', 'from_', 'highlight', 'indices_boost', 'knn', 'min_score', 'pit', 'post_filter', 'profile', 'query', 'rescore', 'runtime_mappings', 'script_fields', 'search_after', 'seq_no_primary_term', 'size', 'slice', 'sort', 'source', 'stats', 'stored_fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'track_total_hits', 'version'), parameter_aliases={'_source': 'source', '_source_excludes': 'source_excludes', '_source_includes': 'source_includes', 'from': 'from_'})
async def submit(self, *, index: t.Optional[t.Union[(str, t.Sequence[str])]]=None, aggregations: t.Optional[t.Mapping[(str, t.Mapping[(str, t.Any)])]]=None, aggs: t.Optional[t.Mapping[(str, t.Mapping[(str, t.Any)])]]=None, allow_no_indices: t.Optional[bool]=None, allow_partial_search_results: t.Optional[bool]=None, analyze_wildcard: t.Optional[bool]=None, analyzer: t.Optional[str]=None, batched_reduce_size: t.Optional[int]=None, ccs_minimize_roundtrips: t.Optional[bool]=None, collapse: t.Optional[t.Mapping[(str, t.Any)]]=None, default_operator: t.Optional[t.Union[("t.Literal['and', 'or']", str)]]=None, df: t.Optional[str]=None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[(str, t.Any)]]]=None, error_trace: t.Optional[bool]=None, expand_wildcards: t.Optional[t.Union[(t.Sequence[t.Union[("t.Literal['all', 'closed', 'hidden', 'none', 'open']", str)]], t.Union[("t.Literal['all', 'closed', 'hidden', 'none', 'open']", str)])]]=None, explain: t.Optional[bool]=None, ext: t.Optional[t.Mapping[(str, t.Any)]]=None, fields: t.Optional[t.Sequence[t.Mapping[(str, t.Any)]]]=None, filter_path: t.Optional[t.Union[(str, t.Sequence[str])]]=None, from_: t.Optional[int]=None, highlight: t.Optional[t.Mapping[(str, t.Any)]]=None, human: t.Optional[bool]=None, ignore_throttled: t.Optional[bool]=None, ignore_unavailable: t.Optional[bool]=None, indices_boost: t.Optional[t.Sequence[t.Mapping[(str, float)]]]=None, keep_alive: t.Optional[t.Union[('t.Literal[-1]', 't.Literal[0]', str)]]=None, keep_on_completion: t.Optional[bool]=None, knn: t.Optional[t.Union[(t.Mapping[(str, t.Any)], t.Sequence[t.Mapping[(str, t.Any)]])]]=None, lenient: t.Optional[bool]=None, max_concurrent_shard_requests: t.Optional[int]=None, min_compatible_shard_node: t.Optional[str]=None, min_score: t.Optional[float]=None, pit: t.Optional[t.Mapping[(str, t.Any)]]=None, post_filter: t.Optional[t.Mapping[(str, t.Any)]]=None, pre_filter_shard_size: t.Optional[int]=None, preference: t.Optional[str]=None, pretty: t.Optional[bool]=None, profile: t.Optional[bool]=None, q: t.Optional[str]=None, query: t.Optional[t.Mapping[(str, t.Any)]]=None, request_cache: t.Optional[bool]=None, rescore: t.Optional[t.Union[(t.Mapping[(str, t.Any)], t.Sequence[t.Mapping[(str, t.Any)]])]]=None, rest_total_hits_as_int: t.Optional[bool]=None, routing: t.Optional[str]=None, runtime_mappings: t.Optional[t.Mapping[(str, t.Mapping[(str, t.Any)])]]=None, script_fields: t.Optional[t.Mapping[(str, t.Mapping[(str, t.Any)])]]=None, scroll: t.Optional[t.Union[('t.Literal[-1]', 't.Literal[0]', str)]]=None, search_after: t.Optional[t.Sequence[t.Union[(None, bool, float, int, str, t.Any)]]]=None, search_type: t.Optional[t.Union[("t.Literal['dfs_query_then_fetch', 'query_then_fetch']", str)]]=None, seq_no_primary_term: t.Optional[bool]=None, size: t.Optional[int]=None, slice: t.Optional[t.Mapping[(str, t.Any)]]=None, sort: t.Optional[t.Union[(t.Sequence[t.Union[(str, t.Mapping[(str, t.Any)])]], t.Union[(str, t.Mapping[(str, t.Any)])])]]=None, source: t.Optional[t.Union[(bool, t.Mapping[(str, t.Any)])]]=None, source_excludes: t.Optional[t.Union[(str, t.Sequence[str])]]=None, source_includes: t.Optional[t.Union[(str, t.Sequence[str])]]=None, stats: t.Optional[t.Sequence[str]]=None, stored_fields: t.Optional[t.Union[(str, t.Sequence[str])]]=None, suggest: t.Optional[t.Mapping[(str, t.Any)]]=None, suggest_field: t.Optional[str]=None, suggest_mode: t.Optional[t.Union[("t.Literal['always', 'missing', 'popular']", str)]]=None, suggest_size: t.Optional[int]=None, suggest_text: t.Optional[str]=None, terminate_after: t.Optional[int]=None, timeout: t.Optional[str]=None, track_scores: t.Optional[bool]=None, track_total_hits: t.Optional[t.Union[(bool, int)]]=None, typed_keys: t.Optional[bool]=None, version: t.Optional[bool]=None, wait_for_completion_timeout: t.Optional[t.Union[('t.Literal[-1]', 't.Literal[0]', str)]]=None, body: t.Optional[t.Dict[(str, t.Any)]]=None) -> ObjectApiResponse[t.Any]:
if (index not in SKIP_IN_PATH):
__path = f'/{_quote(index)}/_async_search'
else:
__path = '/_async_search'
__query: t.Dict[(str, t.Any)] = {}
__body: t.Dict[(str, t.Any)] = (body if (body is not None) else {})
if ((sort is not None) and ((isinstance(sort, str) and (':' in sort)) or (isinstance(sort, (list, tuple)) and all((isinstance(_x, str) for _x in sort)) and any(((':' in _x) for _x in sort))))):
__query['sort'] = sort
sort = None
if (allow_no_indices is not None):
__query['allow_no_indices'] = allow_no_indices
if (allow_partial_search_results is not None):
__query['allow_partial_search_results'] = allow_partial_search_results
if (analyze_wildcard is not None):
__query['analyze_wildcard'] = analyze_wildcard
if (analyzer is not None):
__query['analyzer'] = analyzer
if (batched_reduce_size is not None):
__query['batched_reduce_size'] = batched_reduce_size
if (ccs_minimize_roundtrips is not None):
__query['ccs_minimize_roundtrips'] = ccs_minimize_roundtrips
if (default_operator is not None):
__query['default_operator'] = default_operator
if (df is not None):
__query['df'] = df
if (error_trace is not None):
__query['error_trace'] = error_trace
if (expand_wildcards is not None):
__query['expand_wildcards'] = expand_wildcards
if (filter_path is not None):
__query['filter_path'] = filter_path
if (human is not None):
__query['human'] = human
if (ignore_throttled is not None):
__query['ignore_throttled'] = ignore_throttled
if (ignore_unavailable is not None):
__query['ignore_unavailable'] = ignore_unavailable
if (keep_alive is not None):
__query['keep_alive'] = keep_alive
if (keep_on_completion is not None):
__query['keep_on_completion'] = keep_on_completion
if (lenient is not None):
__query['lenient'] = lenient
if (max_concurrent_shard_requests is not None):
__query['max_concurrent_shard_requests'] = max_concurrent_shard_requests
if (min_compatible_shard_node is not None):
__query['min_compatible_shard_node'] = min_compatible_shard_node
if (pre_filter_shard_size is not None):
__query['pre_filter_shard_size'] = pre_filter_shard_size
if (preference is not None):
__query['preference'] = preference
if (pretty is not None):
__query['pretty'] = pretty
if (q is not None):
__query['q'] = q
if (request_cache is not None):
__query['request_cache'] = request_cache
if (rest_total_hits_as_int is not None):
__query['rest_total_hits_as_int'] = rest_total_hits_as_int
if (routing is not None):
__query['routing'] = routing
if (scroll is not None):
__query['scroll'] = scroll
if (search_type is not None):
__query['search_type'] = search_type
if (source_excludes is not None):
__query['_source_excludes'] = source_excludes
if (source_includes is not None):
__query['_source_includes'] = source_includes
if (suggest_field is not None):
__query['suggest_field'] = suggest_field
if (suggest_mode is not None):
__query['suggest_mode'] = suggest_mode
if (suggest_size is not None):
__query['suggest_size'] = suggest_size
if (suggest_text is not None):
__query['suggest_text'] = suggest_text
if (typed_keys is not None):
__query['typed_keys'] = typed_keys
if (wait_for_completion_timeout is not None):
__query['wait_for_completion_timeout'] = wait_for_completion_timeout
if (not __body):
if (aggregations is not None):
__body['aggregations'] = aggregations
if (aggs is not None):
__body['aggs'] = aggs
if (collapse is not None):
__body['collapse'] = collapse
if (docvalue_fields is not None):
__body['docvalue_fields'] = docvalue_fields
if (explain is not None):
__body['explain'] = explain
if (ext is not None):
__body['ext'] = ext
if (fields is not None):
__body['fields'] = fields
if (from_ is not None):
__body['from'] = from_
if (highlight is not None):
__body['highlight'] = highlight
if (indices_boost is not None):
__body['indices_boost'] = indices_boost
if (knn is not None):
__body['knn'] = knn
if (min_score is not None):
__body['min_score'] = min_score
if (pit is not None):
__body['pit'] = pit
if (post_filter is not None):
__body['post_filter'] = post_filter
if (profile is not None):
__body['profile'] = profile
if (query is not None):
__body['query'] = query
if (rescore is not None):
__body['rescore'] = rescore
if (runtime_mappings is not None):
__body['runtime_mappings'] = runtime_mappings
if (script_fields is not None):
__body['script_fields'] = script_fields
if (search_after is not None):
__body['search_after'] = search_after
if (seq_no_primary_term is not None):
__body['seq_no_primary_term'] = seq_no_primary_term
if (size is not None):
__body['size'] = size
if (slice is not None):
__body['slice'] = slice
if (sort is not None):
__body['sort'] = sort
if (source is not None):
__body['_source'] = source
if (stats is not None):
__body['stats'] = stats
if (stored_fields is not None):
__body['stored_fields'] = stored_fields
if (suggest is not None):
__body['suggest'] = suggest
if (terminate_after is not None):
__body['terminate_after'] = terminate_after
if (timeout is not None):
__body['timeout'] = timeout
if (track_scores is not None):
__body['track_scores'] = track_scores
if (track_total_hits is not None):
__body['track_total_hits'] = track_total_hits
if (version is not None):
__body['version'] = version
if (not __body):
__body = None
__headers = {'accept': 'application/json'}
if (__body is not None):
__headers['content-type'] = 'application/json'
return (await self.perform_request('POST', __path, params=__query, headers=__headers, body=__body)) |
('/select-language', strict_slashes=False)
('/allure-docker-service/select-language', strict_slashes=False)
_required
def select_language_endpoint():
try:
code = request.args.get('code')
if (code is None):
raise Exception("'code' query parameter is required")
code = code.lower()
if (code not in LANGUAGES):
raise Exception("'code' not supported. Use values: {}".format(LANGUAGES))
return render_template(LANGUAGE_TEMPLATE, languageCode=code, css=GLOBAL_CSS)
except Exception as ex:
body = {'meta_data': {'message': str(ex)}}
resp = jsonify(body)
resp.status_code = 400
return resp |
class ExponentialSechSqDiskModel(FunctionModel2DScalarAuto):
_fcoordsys = 'cartesian'
def f(self, inarr, A=1, l=2, h=1, pa=0):
(s, z) = inarr
if (pa == 0):
(sr, zr) = (s, z)
else:
(sinpa, cospa) = (np.sin(pa), np.cos(pa))
sr = ((cospa * s) + (sinpa * z))
zr = (((- sinpa) * s) + (cospa * z))
return ((A * np.exp((- np.abs((sr / l))))) * (np.cosh((zr / h)) ** (- 2)))
def rangehint(self):
return (0, (3 * self.l), ((- 3) * self.h), (3 * self.h)) |
class bsn_gentable_stats_request(bsn_stats_request):
version = 6
type = 18
stats_type = 65535
experimenter = 6035143
subtype = 7
def __init__(self, xid=None, flags=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
packed.append(struct.pack('!L', self.experimenter))
packed.append(struct.pack('!L', self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bsn_gentable_stats_request()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 18)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 65535)
obj.flags = reader.read('!H')[0]
reader.skip(4)
_experimenter = reader.read('!L')[0]
assert (_experimenter == 6035143)
_subtype = reader.read('!L')[0]
assert (_subtype == 7)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
return True
def pretty_print(self, q):
q.text('bsn_gentable_stats_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {1: 'OFPSF_REQ_MORE'}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.breakable()
q.text('}') |
class TestDictItemObserverNotifications(unittest.TestCase):
def test_notify_dict_change(self):
instance = ClassWithDict(values=dict())
graph = create_graph(create_observer(notify=True))
handler = mock.Mock()
call_add_or_remove_notifiers(object=instance.values, graph=graph, handler=handler)
instance.values.update({'1': 1})
(((event,), _),) = handler.call_args_list
self.assertEqual(event.added, {'1': 1})
self.assertEqual(event.removed, {})
def test_notify_custom_trait_dict_change(self):
instance = ClassWithDict(custom_trait_dict=CustomTraitDict())
graph = create_graph(create_observer(notify=True))
handler = mock.Mock()
call_add_or_remove_notifiers(object=instance.custom_trait_dict, graph=graph, handler=handler)
instance.custom_trait_dict.update({'1': 1})
(((event,), _),) = handler.call_args_list
self.assertEqual(event.added, {'1': 1})
self.assertEqual(event.removed, {})
def test_maintain_notifier_for_added(self):
instance = ClassWithDict()
graph = create_graph(create_observer(notify=False, optional=False), create_observer(notify=True, optional=False))
handler = mock.Mock()
call_add_or_remove_notifiers(object=instance.dict_of_dict, graph=graph, handler=handler)
instance.dict_of_dict.update({'1': {'2': 2}})
self.assertEqual(handler.call_count, 0)
del instance.dict_of_dict['1']['2']
self.assertEqual(handler.call_count, 1)
(((event,), _),) = handler.call_args_list
self.assertEqual(event.added, {})
self.assertEqual(event.removed, {'2': 2})
def test_maintain_notifier_for_removed(self):
instance = ClassWithDict(dict_of_dict={'1': {'2': 2}})
graph = create_graph(create_observer(notify=False, optional=False), create_observer(notify=True, optional=False))
handler = mock.Mock()
call_add_or_remove_notifiers(object=instance.dict_of_dict, graph=graph, handler=handler)
inner_dict = instance.dict_of_dict['1']
inner_dict['3'] = 3
self.assertEqual(handler.call_count, 1)
(((event,), _),) = handler.call_args_list
self.assertEqual(event.added, {'3': 3})
self.assertEqual(event.removed, {})
handler.reset_mock()
instance.dict_of_dict['1'] = {}
inner_dict['4'] = 4
self.assertEqual(handler.call_count, 0) |
def test_cicd_contains_pypi_secrets(cookies, tmp_path):
with run_within_dir(tmp_path):
result = cookies.bake(extra_context={'publish_to': 'pypi'})
assert (result.exit_code == 0)
assert file_contains_text(f'{result.project_path}/.github/workflows/on-release-main.yml', 'PYPI_TOKEN')
assert file_contains_text(f'{result.project_path}/Makefile', 'build-and-publish') |
class Configuration():
log_level = str
mqtt_host = str
mqtt_password = str
mqtt_port = int
mqtt_ssl = bool
mqtt_user = str
tydom_alarm_home_zone = int
tydom_alarm_night_zone = int
tydom_alarm_pin = str
tydom_ip = str
tydom_mac = str
tydom_password = str
thermostat_custom_presets = list
def __init__(self):
self.log_level = os.getenv(LOG_LEVEL, 'INFO').upper()
self.mqtt_host = os.getenv(MQTT_HOST, 'localhost')
self.mqtt_password = os.getenv(MQTT_PASSWORD, None)
self.mqtt_port = os.getenv(MQTT_PORT, 1883)
self.mqtt_ssl = os.getenv(MQTT_SSL, False)
self.mqtt_user = os.getenv(MQTT_USER, None)
self.tydom_alarm_home_zone = os.getenv(TYDOM_ALARM_HOME_ZONE, 1)
self.tydom_alarm_night_zone = os.getenv(TYDOM_ALARM_NIGHT_ZONE, 2)
self.tydom_alarm_pin = os.getenv(TYDOM_ALARM_PIN, None)
self.tydom_ip = os.getenv(TYDOM_IP, 'mediation.tydom.com')
self.tydom_mac = os.getenv(TYDOM_MAC, None)
self.tydom_password = os.getenv(TYDOM_PASSWORD, None)
self.deltadore_login = os.getenv(DELTADORE_LOGIN, None)
self.deltadore_password = os.getenv(DELTADORE_PASSWORD, None)
self.thermostat_custom_presets = os.getenv(THERMOSTAT_CUSTOM_PRESETS, None)
def load():
configuration = Configuration()
configuration.override_configuration_for_hassio()
configuration.override_configuration_with_deltadore()
configuration.validate()
return configuration
def override_configuration_for_hassio(self):
hassio_options_file_path = '/data/options.json'
try:
with open(hassio_options_file_path) as f:
logger.info('Hassio environment detected: loading configuration from /data/options.json')
try:
data = json.load(f)
logger.debug('Hassio configuration parsed (%s)', data)
if ((LOG_LEVEL in data) and (data[LOG_LEVEL] != '')):
self.log_level = data[LOG_LEVEL].upper()
if ((TYDOM_MAC in data) and (data[TYDOM_MAC] != '')):
self.tydom_mac = data[TYDOM_MAC]
if ((TYDOM_IP in data) and (data[TYDOM_IP] != '')):
self.tydom_ip = data[TYDOM_IP]
if ((TYDOM_PASSWORD in data) and (data[TYDOM_PASSWORD] != '')):
self.tydom_password = data[TYDOM_PASSWORD]
if ((DELTADORE_LOGIN in data) and (data[DELTADORE_LOGIN] != '')):
self.deltadore_login = data[DELTADORE_LOGIN]
if ((DELTADORE_PASSWORD in data) and (data[DELTADORE_PASSWORD] != '')):
self.deltadore_password = data[DELTADORE_PASSWORD]
if ((TYDOM_ALARM_PIN in data) and (data[TYDOM_ALARM_PIN] != '')):
self.tydom_alarm_pin = str(data[TYDOM_ALARM_PIN])
if ((TYDOM_ALARM_HOME_ZONE in data) and (data[TYDOM_ALARM_HOME_ZONE] != '')):
self.tydom_alarm_home_zone = data[TYDOM_ALARM_HOME_ZONE]
if ((TYDOM_ALARM_NIGHT_ZONE in data) and (data[TYDOM_ALARM_NIGHT_ZONE] != '')):
self.tydom_alarm_night_zone = data[TYDOM_ALARM_NIGHT_ZONE]
if ((MQTT_HOST in data) and (data[MQTT_HOST] != '')):
self.mqtt_host = data[MQTT_HOST]
if ((MQTT_USER in data) and (data[MQTT_USER] != '')):
self.mqtt_user = data[MQTT_USER]
if ((MQTT_PASSWORD in data) and (data[MQTT_PASSWORD] != '')):
self.mqtt_password = data[MQTT_PASSWORD]
if ((MQTT_PORT in data) and (data[MQTT_PORT] != '')):
self.mqtt_port = data[MQTT_PORT]
if ((MQTT_SSL in data) and (data[MQTT_SSL] != '')):
self.mqtt_ssl = data[MQTT_SSL]
except Exception as e:
logger.error('Parsing error %s', e)
except FileNotFoundError:
logger.debug('Hassio environment not detected')
def override_configuration_with_deltadore(self):
if ((self.deltadore_login is not None) and (self.deltadore_login != '') and (self.deltadore_password is not None) and (self.deltadore_password != '')):
tydom_password = TydomClient.getTydomCredentials(self.deltadore_login, self.deltadore_password, self.tydom_mac)
self.tydom_password = tydom_password
def validate(self):
configuration_to_print = copy.copy(self)
configuration_to_print.tydom_password = Configuration.mask_value(configuration_to_print.tydom_password)
configuration_to_print.mqtt_password = Configuration.mask_value(configuration_to_print.mqtt_password)
configuration_to_print.deltadore_password = Configuration.mask_value(configuration_to_print.deltadore_password)
configuration_to_print.tydom_alarm_pin = Configuration.mask_value(configuration_to_print.tydom_alarm_pin)
logger.info('Validating configuration (%s', configuration_to_print.to_json())
if ((self.tydom_mac is None) or (self.tydom_mac == '')):
logger.error('Tydom MAC address must be defined')
sys.exit(1)
if ((self.tydom_password is None) or (self.tydom_password == '')):
logger.error('Tydom password must be defined')
sys.exit(1)
logger.info('The configuration is valid')
def to_json(self):
return json.dumps(self, default=(lambda o: o.__dict__), sort_keys=True, indent=4)
def mask_value(value, nb=1, char='*'):
if ((value is None) or (value == '')):
return ''
if (len(value) < (2 * nb)):
return (char * len(value))
return f'{value[0:nb]}{(char * max(0, (len(value) - (nb * 2))))}{value[(len(value) - nb):len(value)]}' |
class DisasterRecipientDownloadValidator(DownloadValidatorBase):
name = 'disaster_recipient'
def __init__(self, request_data: dict):
super().__init__(request_data)
self.tinyshield_models.extend([{'key': 'filters|def_codes', 'name': 'def_codes', 'type': 'array', 'array_type': 'enum', 'enum_values': sorted(DisasterEmergencyFundCode.objects.values_list('code', flat=True)), 'allow_nulls': False, 'optional': False}, {'key': 'filters|query', 'name': 'query', 'type': 'text', 'text_type': 'search', 'allow_nulls': False, 'optional': True}, {'key': 'filters|award_type_codes', 'name': 'award_type_codes', 'type': 'array', 'array_type': 'enum', 'enum_values': sorted(award_type_mapping.keys()), 'allow_nulls': False, 'optional': True}])
self._json_request['filters'] = request_data.get('filters')
self._json_request = self.get_validated_request()
self._json_request['download_types'] = [self.name]
award_category = 'All-Awards'
award_type_codes = set(self._json_request['filters'].get('award_type_codes', award_type_mapping.keys()))
columns = ['recipient', 'award_obligations', 'award_outlays', 'number_of_awards']
if (award_type_codes <= set(contract_type_mapping.keys())):
award_category = 'Contracts'
elif (award_type_codes <= set(idv_type_mapping.keys())):
award_category = 'Contract-IDVs'
elif (award_type_codes <= set(grant_type_mapping.keys())):
award_category = 'Grants'
elif (award_type_codes <= set(loan_type_mapping.keys())):
award_category = 'Loans'
columns.insert(3, 'face_value_of_loans')
elif (award_type_codes <= set(direct_payment_type_mapping.keys())):
award_category = 'Direct-Payments'
elif (award_type_codes <= set(other_type_mapping.keys())):
award_category = 'Other-Financial-Assistance'
self._json_request['award_category'] = award_category
self._json_request['columns'] = (self._json_request.get('columns') or tuple(columns))
query_text = self._json_request['filters'].pop('query', None)
if query_text:
self._json_request['filters']['query'] = {'text': query_text, 'fields': ['recipient_name']} |
class TestActionShrink_route_index(TestCase):
def builder(self):
self.client = Mock()
self.client.info.return_value = {'version': {'number': '8.0.0'}}
self.client.cat.indices.return_value = testvars.state_one
self.client.indices.get_settings.return_value = testvars.settings_one
self.client.indices.stats.return_value = testvars.stats_one
self.client.indices.exists_alias.return_value = False
self.ilo = IndexList(self.client)
self.shrink = Shrink(self.ilo)
def test_raises(self):
self.builder()
self.client.indices.put_settings.side_effect = testvars.fake_fail
self.assertRaises(Exception, self.shrink.route_index, 'index', 'exclude', '_name', 'not_my_node') |
class GasNowStrategy(SimpleGasStrategy):
def __init__(self, speed: str='fast'):
if (speed not in ('rapid', 'fast', 'standard', 'slow')):
raise ValueError('`speed` must be one of: rapid, fast, standard, slow')
self.speed = speed
def get_gas_price(self) -> int:
return _fetch_gasnow(self.speed) |
def generate_activity_modal_summary(days=7):
date = (datetime.now().date() - timedelta(days=days))
df = pd.read_sql(sql=app.session.query(ouraActivitySummary.summary_date, ouraActivitySummary.score, ouraActivitySummary.cal_active, ouraActivitySummary.target_calories, ouraActivitySummary.inactive).filter((ouraActivitySummary.summary_date > date)).statement, con=engine, index_col='summary_date')
app.session.remove()
df['completion'] = (df['cal_active'] / df['target_calories'])
activity_last_7_graph = dcc.Graph(config={'displayModeBar': False}, figure={'data': [go.Bar(name='Activity', x=df.index, y=df['score'], yaxis='y', text=df['score'], hoverinfo='text', hovertext=['Activity: <b>{:.0f}'.format(x) for x in df['score']], textposition='auto', marker={'color': light_blue})], 'layout': go.Layout(height=300, font=dict(size=10, color=white), xaxis=dict(showline=True, color=white, showticklabels=True, showgrid=False, tickvals=df.index, tickformat='%a'), yaxis=dict(showticklabels=True, showgrid=True, gridcolor='rgb(66,66,66)', color=white, tickformat=',d'), showlegend=False, margin={'l': 40, 'b': 20, 't': 0, 'r': 0})})
goal_completion_last_7_graph = dcc.Graph(config={'displayModeBar': False}, figure={'data': [go.Bar(name='Goal Completion', x=df.index, y=df['completion'], yaxis='y', hoverinfo='text', text=['Goal Completion: <b>{:.0f}%'.format(x) for x in (df['completion'] * 100)], marker={'color': light_blue})], 'layout': go.Layout(height=300, font=dict(size=10, color=white), xaxis=dict(showline=True, color=white, showticklabels=True, showgrid=False, tickvals=df.index, tickformat='%a'), yaxis=dict(showticklabels=True, showgrid=True, gridcolor='rgb(66,66,66)', color=white, tickformat='%'), showlegend=False, margin={'l': 40, 'b': 20, 't': 0, 'r': 0})})
inactive_last_7_graph = dcc.Graph(config={'displayModeBar': False}, figure={'data': [go.Bar(name='Inactive Time', x=df.index, y=(df['inactive'] / 60), yaxis='y', hoverinfo='text', text=['Inactive Time: <b>{}h {}m'.format((x // 60), (x % 60)) for x in df['inactive']], marker={'color': light_blue})], 'layout': go.Layout(height=300, font=dict(size=10, color=white), xaxis=dict(showline=True, color=white, showticklabels=True, showgrid=False, tickvals=df.index, tickformat='%a'), yaxis=dict(showticklabels=True, showgrid=True, gridcolor='rgb(66,66,66)', color=white, tickformat=',d'), showlegend=False, margin={'l': 40, 'b': 20, 't': 0, 'r': 0})})
return [html.Div(id='activity-modal-last-7-container', className='row align-items-center text-center mb-2', style={'whiteSpace': 'normal'}, children=[html.Div(id='activity-score-last-7', className='col-lg-4', children=[html.Div(id='activity-score-last-7-title', children=[html.P('Your average activity score for the last 7 days is {:.0f}'.format(df['score'].mean()))]), html.Div(id='activity-score-last-7-chart', children=[activity_last_7_graph])]), html.Div(id='goal-completion-last-7', className='col-lg-4', children=[html.Div(id='goal-completion-last-7-title', children=[html.P('Your average activity goal completion for the last 7 days is {:.0f}%'.format((df['completion'].mean() * 100)))]), html.Div(id='goal-completion-last-7-chart', children=[goal_completion_last_7_graph])]), html.Div(id='inactive-last-7', className='col-lg-4', children=[html.Div(id='inactive-last-7-title', children=[html.P('Your daily average inactive time over the last 7 days is {:.0f}h {:.0f}m'.format((df['inactive'].mean() // 60), (df['inactive'].mean() % 60)))]), html.Div(id='inactive-last-7-chart', children=[inactive_last_7_graph])])]), html.Div(className='row', children=[html.Div(id='activity-score-correlations', className='col-lg-6', children=[html.Div(id='activity-score-correlation-title', className='col-lg-12 text-center', children=[html.P('Activity Score Correlations (L6M)')]), html.Div(id='activity-score-correlation-chart', className='col-lg-12', children=[generate_correlation_table(10, 'Activity score', 180)])]), html.Div(className='col-lg-6', children=[html.Div(className='row align-items-center text-center', children=[html.Div(id='activity-groupby-controls', className='col-lg-12 mb-2 mt-2', children=[dbc.Button('Year', id='activity-year-button', n_clicks=0, size='sm', className='mr-3'), dbc.Button('Month', id='activity-month-button', n_clicks=0, size='sm', className='mr-3'), dbc.Button('Week', id='activity-week-button', n_clicks=0, size='sm', className='mr-3'), dbc.Button('Day', id='activity-day-button', size='sm')])]), html.Div(className='row', children=[html.Div(className='col-lg-12', children=[dbc.Spinner(color='info', children=[dcc.Graph(id='activity-modal-full-chart', config={'displayModeBar': False})])])])])])] |
class ClientTestCase(unittest.TestCase):
def setUp(self):
env_dist = os.environ
email = env_dist.get('FOFA_EMAIL')
key = env_dist.get('FOFA_KEY')
if ((not email) and (not key)):
config = open('../CONFIG').read()
(email, key) = config.split('\n')
self.client = fofa.Client(email, key)
def test_get_userinfo(self):
userinfo = self.client.get_userinfo()
self.assertIn('isvip', userinfo)
self.assertIn('fcoin', userinfo)
self.assertIn('email', userinfo)
self.assertIn('avatar', userinfo)
self.assertTrue(userinfo['isvip'])
self.assertGreaterEqual(userinfo['fcoin'], 0)
self.assertTrue(userinfo['email'])
self.assertTrue(userinfo['avatar'])
def test_get_data_empty(self):
query = 'djaoiwjklejaoijdoawd'
data = self.client.search(query)
self.assertIn('results', data)
self.assertIn('page', data)
self.assertIn('size', data)
self.assertIn('mode', data)
self.assertIn('query', data)
self.assertFalse(data['results'])
self.assertTrue(data['page'])
self.assertFalse(data['size'])
self.assertTrue(data['mode'])
self.assertTrue(data['query'])
if (sys.version > '3'):
self.assertEqual(data['query'].strip('"'), query)
else:
self.assertEqual(data['query'].encode('ascii', 'ignore').strip('"'), query)
self.assertEqual(data['page'], 1)
self.assertEqual(data['mode'], 'normal')
def test_get_data_normal(self):
query = 'fofa.info'
data = self.client.search(query)
self.assertIn('results', data)
self.assertIn('page', data)
self.assertIn('size', data)
self.assertIn('mode', data)
self.assertIn('query', data)
self.assertTrue(data['results'])
self.assertTrue(data['page'])
self.assertTrue(data['size'])
self.assertTrue(data['mode'])
self.assertTrue(data['query'])
self.assertFalse(data['error'])
if (sys.version > '3'):
self.assertEqual(data['query'].strip('"'), query)
else:
self.assertEqual(data['query'].encode('ascii', 'ignore').strip('"'), query)
self.assertEqual(data['page'], 1)
self.assertEqual(data['mode'], 'normal')
def test_get_data_field(self):
query = 'host="fofa.info"'
data = self.client.search(query, fields='host,title,ip,domain,port,country,city')
self.assertIn('results', data)
self.assertIn('page', data)
self.assertIn('size', data)
self.assertIn('mode', data)
self.assertIn('query', data)
self.assertTrue(data['results'])
self.assertTrue(data['page'])
self.assertTrue(data['size'])
self.assertTrue(data['mode'])
self.assertTrue(data['query'])
self.assertFalse(data['error'])
self.assertEqual(data['query'], query)
self.assertEqual(data['page'], 1)
self.assertEqual(data['mode'], 'extended')
self.assertEqual(len(data['results'][0]), 7)
def test_get_data_extended(self):
query = 'host="fofa.info"'
data = self.client.search(query)
self.assertIn('results', data)
self.assertIn('page', data)
self.assertIn('size', data)
self.assertIn('mode', data)
self.assertIn('query', data)
self.assertTrue(data['results'])
self.assertTrue(data['page'])
self.assertTrue(data['size'])
self.assertTrue(data['mode'])
self.assertTrue(data['query'])
self.assertFalse(data['error'])
self.assertEqual(data['query'], query)
self.assertEqual(data['page'], 1)
self.assertEqual(data['mode'], 'extended')
def test_get_data_page_error1(self):
try:
query = 'djaoiwjklejaoijdoawd'
data = self.client.search(query, size=100, page='asd')
except:
self.assertTrue(True)
def test_get_data_page_error2(self):
try:
query = 'fofa.info'
data = self.client.search(query, size=100, page='300')
except:
self.assertTrue(True) |
(scope='module', params=[('mu_s*inner(grad(u), outer(conj(v), n)) * ds', 'mu_s*inner(grad(u), outer(conj(v), n)) * ds'), ('mu_s*(-inner(grad(u), outer(conj(v), n), ) - inner(outer(conj(u), n), grad(v))) * ds', '-2*mu_s*(inner(grad(u), outer(conj(v), n))) * ds'), ('-mu_s*(inner(grad(u), outer(conj(v), n)) + inner(outer(conj(u), n), grad(v))) * ds', '-2*mu_s*(inner(grad(u), outer(conj(v), n))) * ds'), ('inner(dot(grad(u), mu), outer(conj(v), n)) * ds', 'mu_s*inner(grad(u), outer(conj(v), n)) * ds'), ('(inner(dot(grad(u), mu), outer(conj(v), n)) + inner(outer(conj(u), n), dot(grad(v), mu))) * ds', '2*mu_s*inner(grad(u), outer(conj(v), n)) * ds'), ('-(inner(dot(grad(u), mu), outer(conj(v), n)) + inner(outer(conj(u), n), dot(grad(v), mu))) * ds', '-2*mu_s*inner(grad(u), outer(conj(v), n)) * ds'), ('(-inner(dot(grad(u), mu), outer(conj(v), n)) - inner(outer(conj(u), n), dot(grad(v), mu))) * ds', '-2*mu_s*inner(grad(u), outer(conj(v), n)) * ds')], ids=(lambda x: x[0]))
def form_expect(request, mesh):
dim = mesh.ufl_cell().geometric_dimension()
if (mesh.ufl_cell().cellname() == 'quadrilateral'):
V = FunctionSpace(mesh, 'RTCF', 1)
else:
V = FunctionSpace(mesh, 'RT', 1)
mu_s = Constant(1.0)
mu = as_tensor(np.diag(np.repeat(mu_s, dim)))
n = FacetNormal(mesh)
u = TrialFunction(V)
v = TestFunction(V)
(form, expect) = request.param
return (eval(form), eval(expect)) |
def usort_stdin() -> bool:
if sys.stdin.isatty():
print('Warning: stdin is a tty', file=sys.stderr)
try:
config = Config.find()
data = sys.stdin.read()
result = usort_string(data, config, Path('<stdin>'))
sys.stdout.write(result)
return True
except Exception as e:
sys.stderr.write(repr(e))
return False |
class TestVerifyRunAgainstBcl2fastq2MultiLaneSampleSheetNoLaneSplitting(unittest.TestCase):
def setUp(self):
self.top_dir = tempfile.mkdtemp()
self.mock_illumina_data = MockIlluminaData('test.MockIlluminaData', 'bcl2fastq2', no_lane_splitting=True, paired_end=True, top_dir=self.top_dir)
self.mock_illumina_data.add_fastq_batch('AB', 'AB1', 'AB1_S1', lanes=(1,))
self.mock_illumina_data.add_fastq_batch('AB', 'AB2', 'AB2_S2', lanes=(1,))
self.mock_illumina_data.add_fastq_batch('CDE', 'CDE3', 'CDE3_S3', lanes=(2, 3))
self.mock_illumina_data.add_fastq_batch('CDE', 'CDE4', 'CDE4_S4', lanes=(2, 3))
self.mock_illumina_data.add_undetermined(lanes=(1, 2, 3))
self.mock_illumina_data.create()
(fno, self.sample_sheet) = tempfile.mkstemp()
fp = os.fdopen(fno, 'w')
fp.write('[Header]\n\n[Reads]\n\n[Settings]\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,Sample_Project,Description\n1,AB1,AB1,,,N0,GCCAAT,AB,\n1,AB2,AB2,,,N1,AGTCAA,AB,\n2,CDE3,CDE3,,,N2,GCCAAT,CDE,\n2,CDE4,CDE4,,,N3,AGTCAA,CDE,\n3,CDE3,CDE3,,,N2,GCCAAT,CDE,\n3,CDE4,CDE4,,,N3,AGTCAA,CDE,')
fp.close()
def tearDown(self):
if (self.mock_illumina_data is not None):
self.mock_illumina_data.remove()
os.rmdir(self.top_dir)
os.remove(self.sample_sheet)
def test_verify_run_against_multi_lane_sample_sheet(self):
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertEqual(list_missing_fastqs(illumina_data, self.sample_sheet), [])
self.assertTrue(verify_run_against_sample_sheet(illumina_data, self.sample_sheet)) |
class StatMixClass(SimpleEntity, StatusMixin):
__tablename__ = 'StatMixClasses'
__mapper_args__ = {'polymorphic_identity': 'StatMixClass'}
StatMixClass_id = Column('id', Integer, ForeignKey('SimpleEntities.id'), primary_key=True)
def __init__(self, **kwargs):
super(StatMixClass, self).__init__(**kwargs)
StatusMixin.__init__(self, **kwargs) |
class YamlParser(FileParser):
def parse(self, content: str) -> List[str]:
try:
data = yaml.safe_load(content)
if (isinstance(data, dict) and ('dependencies' in data)):
dependencies = []
for package in data['dependencies']:
if isinstance(package, str):
dependencies.append(package.split('=')[0].split('>')[0].split('<')[0])
elif isinstance(package, dict):
for pip_package in package.values():
if isinstance(pip_package, list):
for pip_dep in pip_package:
dependencies.append(pip_dep.split('==')[0])
return dependencies
except yaml.YAMLError as error:
self.log_error(YML_DECODE_ERROR.format(error))
return [] |
.parametrize('filter_params,is_valid', ((_make_filter_params(), True), (_make_filter_params(from_block=0), True), (_make_filter_params(to_block=0), True), (_make_filter_params(from_block=(- 1)), False), (_make_filter_params(to_block=(- 1)), False), (_make_filter_params(from_block=True), False), (_make_filter_params(to_block=False), False), (_make_filter_params(from_block='0x0'), False), (_make_filter_params(to_block='0x0'), False), (_make_filter_params(from_block='0x1'), False), (_make_filter_params(to_block='0x1'), False), (_make_filter_params(address=ADDRESS_A), True), (_make_filter_params(address=decode_hex(ADDRESS_A)), False), (_make_filter_params(address=[ADDRESS_A, ADDRESS_B]), True), (_make_filter_params(address=TOPIC_A), False), (_make_filter_params(address=decode_hex(TOPIC_A)), False), (_make_filter_params(address=[TOPIC_A, ADDRESS_B]), False), (_make_filter_params(topics=[TOPIC_A]), True), (_make_filter_params(topics=[TOPIC_A, TOPIC_B]), True), (_make_filter_params(topics=[TOPIC_A, None]), True), (_make_filter_params(topics=[[TOPIC_A], [TOPIC_B]]), True), (_make_filter_params(topics=[TOPIC_A, [TOPIC_B, TOPIC_A]]), True), (_make_filter_params(topics=[[TOPIC_A], [TOPIC_B, None]]), True), (_make_filter_params(topics=[decode_hex(TOPIC_A)]), True), (_make_filter_params(topics=[decode_hex(TOPIC_A), decode_hex(TOPIC_B)]), True), (_make_filter_params(topics=[decode_hex(TOPIC_A), None]), True), (_make_filter_params(topics=[[decode_hex(TOPIC_A)], [decode_hex(TOPIC_B)]]), True), (_make_filter_params(topics=[decode_hex(TOPIC_A), [decode_hex(TOPIC_B), decode_hex(TOPIC_A)]]), True), (_make_filter_params(topics=[[decode_hex(TOPIC_A)], [decode_hex(TOPIC_B), None]]), True), (_make_filter_params(topics=[decode_hex(TOPIC_C)]), False), (_make_filter_params(topics=[decode_hex(TOPIC_D)]), False), (_make_filter_params(topics=[ADDRESS_A]), False), (_make_filter_params(topics=[ADDRESS_A, TOPIC_B]), False), (_make_filter_params(topics=[[ADDRESS_A], [TOPIC_B]]), False)))
def test_filter_params_input_validation(validator, filter_params, is_valid):
if is_valid:
validator.validate_inbound_filter_params(**filter_params)
else:
with pytest.raises(ValidationError):
validator.validate_inbound_filter_params(**filter_params) |
def file_object_from_entry(fo_entry: FileObjectEntry, analysis_filter: (list[str] | None)=None, included_files: (set[str] | None)=None, parents: (set[str] | None)=None, virtual_file_paths: (dict[(str, list[str])] | None)=None, parent_fw: (set[str] | None)=None) -> FileObject:
file_object = FileObject()
_populate_fo_data(fo_entry, file_object, analysis_filter, included_files, parents, virtual_file_paths, parent_fw)
return file_object |
def get_config():
config = config_dict.ConfigDict()
config.task = 'quadruped-run'
config.action_repeat = 4
action_repeat = config.get_oneway_ref('action_repeat')
config.discount = 0.99
config.episode_length = (1000 // action_repeat)
config.train_steps = (500000 // action_repeat)
config.iterations = 6
config.num_samples = 512
config.num_elites = 64
config.mixture_coef = 0.05
config.min_std = 0.05
config.temperature = 0.5
config.momentum = 0.1
config.batch_size = 512
config.max_buffer_size = 1000000
config.horizon = 5
config.reward_coef = 0.5
config.value_coef = 0.1
config.consistency_coef = 2
config.rho = 0.5
config.kappa = 0.1
config.lr = 0.001
schedule_steps = 25000
config.std_schedule = dict(name='linear_schedule', kwargs=dict(init_value=0.5, end_value=config.get_oneway_ref('min_std'), transition_steps=schedule_steps))
config.variable_update_period = 1
config.horizon_schedule = dict(name='linear_schedule', kwargs=dict(init_value=1, end_value=config.get_oneway_ref('horizon'), transition_steps=schedule_steps))
config.per_alpha = 0.6
config.per_beta = 0.4
config.grad_clip_norm = 10
config.seed_steps = 5000
config.update_freq = 2
config.tau = 0.01
config.enc_dim = 256
config.mlp_dim = 512
config.latent_dim = 50
config.use_wandb = False
config.wandb_project = None
config.wandb_entity = None
config.wandb_name = None
config.seed = 1
config.exp_name = 'default'
config.eval_freq = 20000
config.eval_episodes = 10
config.save_video = False
config.save_model = False
return config |
def test_call():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/')
assert (rv.status_code == 200)
rv = client.get('/admin/mockview/')
assert (rv.data == b'Success!')
rv = client.get('/admin/mockview/test/')
assert (rv.data == b'Success!')
view.allow_call = False
rv = client.get('/admin/mockview/')
assert (rv.data == b'Failure!') |
def fill_numeric(df: pd.DataFrame, col: str, fill_type: str='median') -> pd.DataFrame:
if (fill_type == 'median'):
fill_value = df[col].median()
elif (fill_type == 'mean'):
fill_value = df[col].mean()
elif (fill_type == '-1'):
fill_value = (- 1)
else:
raise NotImplementedError('Valid fill_type options are "mean", "median", "-1')
df.loc[(df[col].isnull(), col)] = fill_value
return df |
.anyio
class TestMaxPerSecond():
class Spy():
def __init__(self, num_tasks: int) -> None:
self.tasks = [self.task for _ in range(num_tasks)]
self.args = [None for _ in range(num_tasks)]
self.start_times: List[float] = []
async def task(self, *args: Any) -> None:
time = float(anyio.current_time())
self.start_times.append(time)
def max_task_delta(self) -> float:
return max(((t2 - t1) for (t1, t2) in pairwise(self.start_times)))
max_per_second_params = [5, 10, 20, 30, 40, 50, 70, 100]
def assert_limit(cls, max_per_second: float) -> Iterator['Spy']:
spy = cls.Spy(num_tasks=3)
(yield spy)
period = (1 / max_per_second)
assert (spy.max_task_delta == pytest.approx(period, rel=0.75))
.slow
.parametrize('max_per_second', max_per_second_params)
async def test_run_on_each(self, max_per_second: float) -> None:
with self.assert_limit(max_per_second) as spy:
(await aiometer.run_on_each(spy.task, spy.args, max_per_second=max_per_second))
.slow
.parametrize('max_per_second', max_per_second_params)
async def test_run_all(self, max_per_second: float) -> None:
with self.assert_limit(max_per_second) as spy:
(await aiometer.run_all(spy.tasks, max_per_second=max_per_second))
.slow
.parametrize('max_per_second', max_per_second_params)
async def test_amap(self, max_per_second: float) -> None:
with self.assert_limit(max_per_second) as spy:
async with aiometer.amap(spy.task, spy.args, max_per_second=max_per_second) as results:
async for _ in results:
pass
.slow
.parametrize('max_per_second', max_per_second_params)
async def test_run_any(self, max_per_second: float) -> None:
with self.assert_limit(max_per_second) as spy:
(await aiometer.run_any(spy.tasks, max_per_second=max_per_second)) |
def test_multiple_values_returned_with_multiple_targets():
class ApprovalMachine(StateMachine):
requested = State(initial=True)
accepted = State(final=True)
denied = State(final=True)
(accepted, denied)
def validate(self):
return (1, 2)
machine = ApprovalMachine()
assert (machine.validate() == (1, 2)) |
class Camera_Clip(object):
def __init__(self, filename, timestamp, duration=0, include=False):
self._filename = filename
self._duration = duration
self._timestamp = timestamp
self._include = include
def filename(self):
return self._filename
def filename(self, value):
self._filename = value
def duration(self):
return (self._duration if (self._duration is not None) else 0)
def duration(self, value):
self._duration = value
def timestamp(self):
return self._timestamp
def timestamp(self, value):
self._timestamp = value
def include(self):
return self._include
def include(self, value):
self._include = value
def start_timestamp(self):
return self.timestamp
def end_timestamp(self):
return (self.start_timestamp + timedelta(seconds=self.duration)) |
class MultiPartParser(BaseParser):
media_type = 'multipart/form-data'
handles_file_uploads = True
handles_form_data = True
def parse(self, stream, media_type, **options):
boundary = media_type.params.get('boundary')
if (boundary is None):
msg = 'Multipart message missing boundary in Content-Type header'
raise exceptions.ParseError(msg)
boundary = boundary.encode('ascii')
content_length = options.get('content_length')
assert (content_length is not None), 'MultiPartParser.parse() requires `content_length` argument'
buffer_size = content_length
while ((buffer_size % 4) or (buffer_size < 1024)):
buffer_size += 1
multipart_parser = WerkzeugMultiPartParser(default_stream_factory, buffer_size=buffer_size)
try:
return multipart_parser.parse(stream, boundary, content_length)
except ValueError as exc:
msg = ('Multipart parse error - %s' % str(exc))
raise exceptions.ParseError(msg) |
class TestStoragePromptTemplate():
def test_constructor_and_properties(self):
storage_item = StoragePromptTemplate(prompt_name='test', content='Hello {name}', prompt_language='en', prompt_format='f-string', input_variables='name', model='model1', chat_scene='chat', sub_chat_scene='sub_chat', prompt_type='type', user_name='user', sys_code='sys')
assert (storage_item.prompt_name == 'test')
assert (storage_item.content == 'Hello {name}')
assert (storage_item.prompt_language == 'en')
assert (storage_item.prompt_format == 'f-string')
assert (storage_item.input_variables == 'name')
assert (storage_item.model == 'model1')
def test_constructor_exceptions(self):
with pytest.raises(ValueError):
StoragePromptTemplate(prompt_name=None, content='Hello')
def test_to_prompt_template(self, sample_storage_prompt_template):
prompt_template = sample_storage_prompt_template.to_prompt_template()
assert isinstance(prompt_template, PromptTemplate)
assert (prompt_template.template == 'Sample content, {var1}, {var2}')
assert (prompt_template.input_variables == ['var1', 'var2'])
def test_from_prompt_template(self):
prompt_template = PromptTemplate(template='Sample content, {var1}, {var2}', input_variables=['var1', 'var2'], template_format='f-string')
storage_prompt_template = StoragePromptTemplate.from_prompt_template(prompt_template=prompt_template, prompt_name='test_prompt')
assert (storage_prompt_template.prompt_name == 'test_prompt')
assert (storage_prompt_template.content == 'Sample content, {var1}, {var2}')
assert (storage_prompt_template.input_variables == 'var1,var2')
def test_merge(self, sample_storage_prompt_template):
other = StoragePromptTemplate(prompt_name='other_prompt', content='Other content')
sample_storage_prompt_template.merge(other)
assert (sample_storage_prompt_template.content == 'Other content')
def test_to_dict(self, sample_storage_prompt_template):
result = sample_storage_prompt_template.to_dict()
assert (result == {'prompt_name': 'test_prompt', 'content': 'Sample content, {var1}, {var2}', 'prompt_language': 'en', 'prompt_format': 'f-string', 'input_variables': 'var1,var2', 'model': 'model1', 'chat_scene': 'scene1', 'sub_chat_scene': 'subscene1', 'prompt_type': 'type1', 'user_name': 'user1', 'sys_code': 'code1'})
def test_save_and_load_storage(self, sample_storage_prompt_template, in_memory_storage):
in_memory_storage.save(sample_storage_prompt_template)
loaded_item = in_memory_storage.load(sample_storage_prompt_template.identifier, StoragePromptTemplate)
assert (loaded_item.content == 'Sample content, {var1}, {var2}')
def test_check_exceptions(self):
with pytest.raises(ValueError):
StoragePromptTemplate(prompt_name=None, content='Hello')
def test_from_object(self, sample_storage_prompt_template):
other = StoragePromptTemplate(prompt_name='other', content='Other content')
sample_storage_prompt_template.from_object(other)
assert (sample_storage_prompt_template.content == 'Other content')
assert (sample_storage_prompt_template.input_variables != 'var1,var2')
assert (sample_storage_prompt_template.prompt_name == 'test_prompt')
assert (sample_storage_prompt_template.sys_code == 'code1') |
def test_topic_tracker_needs_update(database, user, topic):
forumsread = ForumsRead.query.filter((ForumsRead.user_id == user.id), (ForumsRead.forum_id == topic.forum_id)).first()
topicsread = TopicsRead.query.filter((TopicsRead.user_id == user.id), (TopicsRead.topic_id == topic.id)).first()
with current_app.test_request_context():
assert topic.tracker_needs_update(forumsread, topicsread)
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = topic.id
topicsread.forum_id = topic.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = topic.forum_id
forumsread.last_read = datetime.utcnow()
forumsread.save()
assert (not topic.tracker_needs_update(forumsread, topicsread))
post = Post(content='Test Content')
post.save(topic=topic, user=user)
assert topic.tracker_needs_update(forumsread, topicsread) |
('cmd_args', [['polynomial.y=choice(-1, 0, 1)', 'polynomial.x=range(2,4)'], ['polynomial.y=1', 'polynomial.x=range(2,4)']])
def test_search_space_exhausted_exception(tmpdir: Path, cmd_args: List[str]) -> None:
cmd = (['tests/apps/polynomial.py', '-m', ('hydra.run.dir=' + str(tmpdir)), 'hydra.job.chdir=True', 'hydra.sweeper.ax_config.max_trials=2'] + cmd_args)
run_python_script(cmd) |
def test_tokenization_mismatch(nlp, train_data):
train_examples = []
for (text, annot) in train_data[0:1]:
eg = Example.from_dict(nlp.make_doc(text), annot)
ref = eg.reference
char_spans = {}
for (key, cluster) in ref.spans.items():
char_spans[key] = []
for span in cluster:
char_spans[key].append((span[0].idx, (span[(- 1)].idx + len(span[(- 1)]))))
with ref.retokenize() as retokenizer:
retokenizer.merge(ref[5:7])
for (key, _) in ref.spans.items():
spans = char_spans[key]
ref.spans[key] = [ref.char_span(*span) for span in spans]
train_examples.append(eg)
nlp.add_pipe('experimental_coref')
optimizer = nlp.initialize()
test_text = train_data[0][0]
doc = nlp(test_text)
for i in range(15):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
doc = nlp(test_text)
doc = nlp(test_text)
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
texts = [test_text, 'I noticed many friends around me', 'They received it. They received the SMS.']
docs1 = list(nlp.pipe(texts))
docs2 = list(nlp.pipe(texts))
docs3 = [nlp(text) for text in texts]
assert (get_clusters_from_doc(docs1[0]) == get_clusters_from_doc(docs2[0]))
assert (get_clusters_from_doc(docs1[0]) == get_clusters_from_doc(docs3[0])) |
class FacebookEvent(BaseObject):
def __init__(self, api=None, body=None, communication=None, id=None, page=None, ticket_via=None, type=None, **kwargs):
self.api = api
self.body = body
self.communication = communication
self.id = id
self.page = page
self.ticket_via = ticket_via
self.type = type
for (key, value) in kwargs.items():
setattr(self, key, value)
for key in self.to_dict():
if (getattr(self, key) is None):
try:
self._dirty_attributes.remove(key)
except KeyError:
continue |
def get_local_rev(proj_dir, local_branch):
try:
output = subprocess.check_output('git rev-parse {}'.format(local_branch), cwd=proj_dir, shell=True).decode('utf-8')
return output.rstrip()
except subprocess.CalledProcessError:
log.error("failed to call 'git rev-parse'")
return None |
class PlotExample(HasTraits):
plot = Instance(Component)
traits_view = View(UItem('plot', editor=ComponentEditor()), width=700, height=600, resizable=True, title='Dataview + renderer example')
def _plot_default(self):
x = linspace((- 5), 10, 500)
y = sin(x)
y2 = (0.5 * cos((2 * x)))
view = DataView(border_visible=True)
scatter = ScatterPlot(index=ArrayDataSource(x), value=ArrayDataSource(y), marker='square', color='red', outline_color='transparent', index_mapper=LinearMapper(range=view.index_range), value_mapper=LinearMapper(range=view.value_range))
line = LinePlot(index=scatter.index, value=ArrayDataSource(y2), color='blue', index_mapper=LinearMapper(range=view.index_range), value_mapper=LinearMapper(range=view.value_range))
view.index_range.sources.append(scatter.index)
view.value_range.sources.append(scatter.value)
view.value_range.sources.append(line.value)
view.add(scatter)
view.add(line)
view.tools.append(PanTool(view))
view.overlays.append(ZoomTool(view))
return view |
class MPOConfig():
min_replay_size: int = 1000
max_replay_size: int = 1000000
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
prefetch_size: Optional[int] = None
samples_per_insert: float = 256.0
samples_per_insert_tolerance_rate: float = 0.1
discount: float = 0.99
batch_size: int = 256
num_samples: int = 20
n_step: int = 5
clipping: bool = True
policy_learning_rate = 0.0001
critic_learning_rate = 0.0001
dual_learning_rate = 0.01
target_policy_update_period: int = 100
target_critic_update_period: int = 100 |
.django_db
def test_admin_readonly_fields(rf: RequestFactory) -> None:
request = build_admin_request(rf)
admin = APIKeyModelAdmin(APIKey, site)
assert (admin.get_readonly_fields(request) == ('prefix',))
api_key = APIKey(name='test')
assert (admin.get_readonly_fields(request, obj=api_key) == ('prefix',))
api_key = APIKey(name='test', revoked=True)
assert (admin.get_readonly_fields(request, obj=api_key) == ('prefix', 'name', 'revoked', 'expiry_date')) |
((sys.version_info[0:2] < (3, 8)), reason='zip.Path available in python 3.8+')
def test_importlib_resource_load_zip_path() -> None:
config_source = ImportlibResourcesConfigSource(provider='foo', path='pkg://bar')
conf = config_source._read_config(zipfile.Path('hydra/test_utils/configs/conf.zip', 'config.yaml'))
assert (conf.config == {'foo': 'bar'})
assert (conf.header == {'package': None}) |
('pyscf')
def test_layer_calc(pyscf_acetaldehyd_getter):
geom = pyscf_acetaldehyd_getter()
calc = geom.calculator
real_calc = PySCF(basis='sto3g')
args = (geom.atoms, geom.cart_coords)
real_low_en = real_calc.get_energy(*args)['energy']
oniom_en = geom.energy
ref_energies = (real_low_en, oniom_en)
real_low_forces = real_calc.get_forces(*args)['forces']
oniom_forces = geom.cart_forces
ref_forces = (real_low_forces, oniom_forces)
real_low_hessian = real_calc.get_hessian(*args)['hessian']
oniom_hessian = geom.cart_hessian
ref_hessians = (real_low_hessian, oniom_hessian)
energies = list()
all_forces = list()
all_hessians = list()
for (i, _) in enumerate(calc.layers):
lcalc = calc.get_layer_calc(i)
energy = lcalc.get_energy(*args)['energy']
energies.append(energy)
forces = lcalc.get_forces(*args)['forces']
all_forces.append(forces)
hessian = lcalc.get_hessian(*args)['hessian']
all_hessians.append(hessian)
np.testing.assert_allclose(energies, ref_energies)
np.testing.assert_allclose(all_forces, ref_forces, atol=2e-07)
np.testing.assert_allclose(all_hessians, ref_hessians, atol=2e-07) |
class OptionPlotoptionsSunburstSonificationDefaultinstrumentoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
('config_name,overrides,expected', [param(None, [], [], id='none'), param(None, ['+group1=file1'], [ResultDefault(config_path='group1/file1', package='group1', parent='_dummy_empty_config_')], id='none+group1=file1')])
def test_with_none_primary(config_name: str, overrides: List[str], expected: List[ResultDefault]) -> None:
_test_defaults_list_impl(config_name=config_name, overrides=overrides, expected=expected) |
class HTMLInline(ContentEditorInline):
formfield_overrides = {models.TextField: {'widget': forms.Textarea(attrs={'rows': 3, 'cols': 40, 'class': 'vLargeTextField'})}}
button = '<span class="material-icons">code</span>'
def get_fieldsets(self, request, obj=None):
fieldsets = super().get_fieldsets(request, obj=obj)
fieldsets[0][1]['description'] = format_html('<strong><big>{}</big></strong>', _("Please note that the HTML must be well formed. It's your responsibility to ensure that nothing breaks now or in the future when using this plugin."))
return fieldsets |
def tokenizer_senter_score(examples, **kwargs):
def has_sents(doc):
return doc.has_annotation('SENT_START')
results = Scorer.score_tokenization(examples)
results.update(Scorer.score_spans(examples, 'sents', has_annotation=has_sents, **kwargs))
del results['sents_per_type']
return results |
class Territory():
def __init__(self, grid, main, id_num, color):
self.grid = grid
self.id = id_num
self.color = color
self.main = main
main.territory = self
self.last_added = [main]
self.members = [main]
self.groups = []
self.db_instance = None
def frontier(self):
frontier = []
for m in self.last_added:
frontier.extend([h for h in m.surrounding if (h.is_owned is False)])
return frontier
def landlocked(self):
for h in self.members:
if any([h for h in h.surrounding if h.is_water]):
return False
return True
def neighbors(self):
terr = set()
for h in self.members:
terr.update(set([m.territory for m in h.surrounding if (m.is_land and (m.territory is not None) and (m.territory.id != self.id))]))
return terr
def avg_temp(self):
return round((sum([h.temperature for h in self.members]) / self.size), 2)
def avg_moisture(self):
return round((sum([h.moisture for h in self.members]) / self.size), 2)
def biomes(self):
b = dict()
for h in self.members:
if (h.biome.name in b):
b[h.biome.name]['count'] += 1
else:
b[h.biome.name] = dict(biome=h.biome, count=1)
return sorted(b.values(), key=(lambda k: k['count']), reverse=True)
def __eq__(self, other):
return (self.id == other.id)
def __key(self):
return (self.id, self.color)
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return '<Territory ID: {}>'.format(self.id)
def find_groups(self):
def find_unmarked():
while True:
found = random.choice(self.members)
if (found.marked is False):
return found
def step(sh, group):
if sh.marked:
return
else:
sh.marked = True
group.append(sh)
sur = [s for s in sh.map_surrounding if (s.is_land and (s.territory is not None) and (s.territory == self) and (s.marked is False))]
for h in sur:
step(h, group)
def num_marked():
return len([h for h in self.members if h.marked])
groups = []
while (num_marked() < len(self.members)):
group = []
sh = find_unmarked()
step(sh, group)
groups.append(group)
result = []
for g in groups:
mx_s = [h.x for h in g]
mx = (sum(mx_s) / len(mx_s))
my_s = [h.y for h in g]
my = (sum(my_s) / len(my_s))
result.append(dict(size=len(g), x=round(mx), y=round(my)))
self.groups = result
def size(self):
return len(self.members) |
class OptionSeriesErrorbarSonificationContexttracksMappingLowpass(Options):
def frequency(self) -> 'OptionSeriesErrorbarSonificationContexttracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionSeriesErrorbarSonificationContexttracksMappingLowpassFrequency)
def resonance(self) -> 'OptionSeriesErrorbarSonificationContexttracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionSeriesErrorbarSonificationContexttracksMappingLowpassResonance) |
def test_write_messages():
output = BytesIO()
ros_writer = Ros2Writer(output=output)
schema = ros_writer.register_msgdef('test_msgs/TestData', 'string a\nint32 b')
for i in range(0, 10):
ros_writer.write_message(topic='/test', schema=schema, message={'a': f'string message {i}', 'b': i}, log_time=i, publish_time=i, sequence=i)
ros_writer.finish()
output.seek(0)
for (index, msg) in enumerate(read_ros2_messages(output)):
assert (msg.channel.topic == '/test')
assert (msg.schema.name == 'test_msgs/TestData')
assert (msg.decoded_message.a == f'string message {index}')
assert (msg.decoded_message.b == index)
assert (msg.message.log_time == index)
assert (msg.message.publish_time == index)
assert (msg.message.sequence == index) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.