code stringlengths 281 23.7M |
|---|
class SpeechmaticsApi(ProviderInterface, AudioInterface):
provider_name = 'speechmatics'
def __init__(self, api_keys: Dict={}) -> None:
self.api_settings = load_provider(ProviderDataEnum.KEY, self.provider_name, api_keys=api_keys)
self.key = self.api_settings['speechmatics_key']
self.base_url = '
self.headers = {'Authorization': f'Bearer {self.key}'}
def audio__speech_to_text_async__launch_job(self, file: str, language: str, speakers: int, profanity_filter: bool, vocabulary: Optional[List[str]], audio_attributes: tuple, model: str, file_url: str='', provider_params=dict()) -> AsyncLaunchJobResponseType:
file_ = open(file, 'rb')
config = {'language': language, 'diarization': 'speaker', 'operating_point': model}
if vocabulary:
config['additional_vocab'] = [{'content': word} for word in vocabulary]
payload = {'config': json.dumps({'type': 'transcription', 'transcription_config': config}), **provider_params}
response = requests.post(url=self.base_url, headers=self.headers, data=payload, files={'data_file': file_})
if (response.status_code != 201):
raise ProviderException(response.content, response.status_code)
return AsyncLaunchJobResponseType(provider_job_id=response.json()['id'])
def audio__speech_to_text_async__get_job_result(self, provider_job_id: str) -> AsyncBaseResponseType[SpeechToTextAsyncDataClass]:
response = requests.get(f'{self.base_url}/{provider_job_id}', headers=self.headers)
original_response = response.json()
if (response.status_code != 200):
if (original_response.get('details') == 'path not found'):
raise AsyncJobException(reason=AsyncJobExceptionReason.DEPRECATED_JOB_ID, code=response.status_code)
raise ProviderException(message=original_response, code=response.status_code)
job_details = original_response['job']
errors = job_details.get('errors')
if errors:
raise ProviderException(errors, code=response.status_code)
status = job_details['status']
if (status == 'running'):
return AsyncPendingResponseType[SpeechToTextAsyncDataClass](provider_job_id=provider_job_id)
elif (status == 'done'):
response = requests.get(f'{self.base_url}/{provider_job_id}/transcript', headers=self.headers)
original_response = response.json()
if (response.status_code != 200):
raise ProviderException(original_response.get('errors'), code=response.status_code)
diarization_entries = []
speakers = set()
text = ''
for entry in original_response.get('results'):
text = ((text + ' ') + entry['alternatives'][0]['content'])
speaker = (entry['alternatives'][0].get('speaker') or None)
if speaker:
speakers.add(speaker)
diarization_entries.append(SpeechDiarizationEntry(segment=entry['alternatives'][0]['content'], start_time=str(entry['start_time']), end_time=str(entry['end_time']), confidence=entry['alternatives'][0]['confidence'], speaker=((list(speakers).index(speaker) + 1) if speaker else 0)))
diarization = SpeechDiarization(total_speakers=len(speakers), entries=diarization_entries)
return AsyncResponseType(original_response=original_response, standardized_response=SpeechToTextAsyncDataClass(text=text, diarization=diarization), provider_job_id=provider_job_id)
else:
raise ProviderException('Unexpected job failed') |
class VmStatusChanged(Event):
_name_ = 'vm_status_changed'
def __init__(self, task_id, vm):
status_display = vm.status_display(pending=False)
super(VmStatusChanged, self).__init__(task_id, vm_hostname=vm.hostname, alias=vm.alias, status=vm.status, status_display=status_display, detail=status_display, status_change=str(vm.status_change), define_changed=vm.json_changed(), locked=vm.locked) |
def main():
parser = argparse.ArgumentParser(description='Script to test correct DDR behaviour.')
parser.add_argument('--bitslip', default=None, help='Defines a bitslip value.')
parser.add_argument('--delay', default=None, help='Defines a delay value.')
args = parser.parse_args()
wb = RemoteClient(debug=False)
wb.open()
wb.regs.sdram_dfii_control.write(0)
for (i, (comment, a, ba, cmd, delay)) in enumerate(init_sequence):
print(comment)
wb.regs.sdram_dfii_pi0_address.write(a)
wb.regs.sdram_dfii_pi0_baddress.write(ba)
if (i < 2):
wb.regs.sdram_dfii_control.write(cmd)
else:
wb.regs.sdram_dfii_pi0_command.write(cmd)
wb.regs.sdram_dfii_pi0_command_issue.write(1)
wb.regs.sdram_dfii_control.write(dfii_control_sel)
if ((args.bitslip is None) or (args.delay is None)):
(bitslip, delay) = find_bitslips_delays(wb)
else:
bitslip = int(args.bitslip)
delay = int(args.delay)
set_bitslip_delay(wb, bitslip, delay)
start_command_interface(wb)
wb.close() |
def _check_output_is_not_input(input_file, output_file):
i = getattr(input_file, 'name', input_file)
o = getattr(output_file, 'name', output_file)
check_f = (_samefile if (isinstance(i, str) and isinstance(o, str)) else operator.eq)
if check_f(i, o):
raise esptool.FatalError('The input "{}" and output "{}" should not be the same!'.format(i, o)) |
class HardforkIndex(Index):
name = 'hardforks'
localname = 'Hard Fork Index'
shortname = 'Hard Fork'
def generate(self, doc_names: Optional[Iterable[str]]=None) -> Tuple[(List[Tuple[(str, List[IndexEntry])]], bool)]:
forks = Hardfork.discover()
content: Dict[(Hardfork, _ForkEntry)] = defaultdict((lambda : _ForkEntry(modules=[], comparisons=[])))
comparisons = defaultdict(set)
for (name, _, kind, doc_name, _, _) in self.domain.get_objects():
if (kind != 'module'):
continue
fork_index = None
fork = None
for (index, guess) in enumerate(forks):
if name.startswith((guess.name + '.')):
fork = guess
fork_index = index
break
if ((fork is None) or (fork_index is None)):
continue
content[fork].modules.append(IndexEntry(name, 2, doc_name, '', '', '', ''))
base = f'{BASE_FORKS}/{fork.short_name}'
rel_doc_name = os.path.relpath(doc_name, base)
rel_name = _remove_prefix(name, fork.name)[1:]
next_fork: Optional[Hardfork]
try:
next_fork = forks[(fork_index + 1)]
except IndexError:
next_fork = None
if next_fork:
comparisons[(fork, next_fork)].add((rel_doc_name, rel_name))
prev_fork = None
if (fork_index > 0):
prev_fork = forks[(fork_index - 1)]
if prev_fork:
comparisons[(prev_fork, fork)].add((rel_doc_name, rel_name))
for ((prev_fork, fork), modules) in comparisons.items():
for (module, rel_name) in modules:
doc_name = f'diffs/{prev_fork.short_name}_{fork.short_name}/{module}'
diff_file_path = os.path.join(os.getcwd(), 'doc', (doc_name + '.pickle64'))
if (not os.path.isfile(diff_file_path)):
continue
content[fork].comparisons.append(IndexEntry(rel_name, 2, doc_name, '', '', '', ''))
entries = sorted(content.items(), key=(lambda i: i[0].criteria))
result = []
for (fork, entry) in entries:
items: List[IndexEntry] = []
result.append((fork.name, items))
items.append(IndexEntry('Specification', 1, '', '', '', '', ''))
items.extend(entry.modules)
if (not entry.comparisons):
continue
items.append(IndexEntry('Changes', 1, '', '', '', '', ''))
items.extend(sorted(entry.comparisons, key=(lambda i: i.name)))
return (result, True) |
('other_flag', [None, '--run', '--multirun', '--info', '--shell-completion', '--hydra-help'])
def test_resolve_flag_errmsg(tmpdir: Path, other_flag: Optional[str]) -> None:
cmd = ['examples/tutorials/basic/your_first_hydra_app/3_using_config/my_app.py', ('hydra.run.dir=' + str(tmpdir)), 'hydra.job.chdir=True', '--resolve']
if (other_flag is not None):
cmd.append(other_flag)
err = run_with_error(cmd)
assert normalize_newlines(err).endswith('ValueError: The --resolve flag can only be used in conjunction with --cfg or --help') |
def test_airy_single_layer_array(basement, water_array):
(thickness_water, density_water) = water_array
layers = {'water': (thickness_water, density_water)}
root = isostatic_moho_airy(basement, layers=layers, density_crust=np.array([1, 2, 1, 1, 2, 1], dtype=float), density_mantle=np.array([3, 3, 3, 3, 3, 3], dtype=float), reference_depth=0)
true_root = np.array([(- 0.5), (- 1.6), 0.0, 0.5, 4.0, 1.5])
npt.assert_allclose(root, true_root, rtol=1e-10, atol=0)
if isinstance(root, xr.DataArray):
assert (root.attrs['density_water'] == density_water).all() |
class GaugeFlowTablePrometheusPoller(GaugeFlowTablePoller):
def _update(self, rcv_time, msg):
jsondict = msg.to_jsondict()
for stats_reply in jsondict['OFPFlowStatsReply']['body']:
stats = stats_reply['OFPFlowStats']
for (var, tags, count) in self._parse_flow_stats(stats):
table_id = int(tags['table_id'])
table_name = self.dp.table_by_id(table_id).name
table_tags = self.prom_client.table_tags[table_name]
tags_keys = set(tags.keys())
if (tags_keys != table_tags):
unreg_tags = (tags_keys - table_tags)
if unreg_tags:
table_tags.update(unreg_tags)
self.prom_client.reregister_flow_vars(table_name, table_tags)
self.logger.info(('Adding tags %s to %s for table %s' % (unreg_tags, table_tags, table_name)))
missing_tags = (table_tags - tags_keys)
for tag in missing_tags:
tags[tag] = ''
table_prom_var = PROM_PREFIX_DELIM.join((var, table_name))
try:
self.prom_client.metrics[table_prom_var].labels(**tags).set(count)
except ValueError:
self.logger.error(('labels %s versus %s incorrect on %s' % (tags, table_tags, table_prom_var))) |
def test_imputation_of_numerical_vars_cast_as_object_and_returned_as_numerical(df_na):
df_na = df_na.copy()
df_na['Marks'] = df_na['Marks'].astype('O')
imputer = CategoricalImputer(imputation_method='frequent', variables=['City', 'Studies', 'Marks'])
X_transformed = imputer.fit_transform(df_na)
X_reference = df_na.copy()
X_reference['Marks'] = X_reference['Marks'].fillna(0.8)
X_reference['City'] = X_reference['City'].fillna('London')
X_reference['Studies'] = X_reference['Studies'].fillna('Bachelor')
assert (imputer.variables == ['City', 'Studies', 'Marks'])
assert (imputer.variables_ == ['City', 'Studies', 'Marks'])
assert (imputer.imputer_dict_ == {'Studies': 'Bachelor', 'City': 'London', 'Marks': 0.8})
assert (X_transformed['Marks'].dtype == 'float')
pd.testing.assert_frame_equal(X_transformed, X_reference) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'log_gui_display': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['log_gui_display']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['log_gui_display']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'log_gui_display')
(is_error, has_changed, result, diff) = fortios_log(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def enforce_requirements():
requirement_path = os.path.join(startup.base_dir, 'contrib', 'requirements', 'requirements.txt')
if (not os.path.exists(requirement_path)):
return
if os.path.exists(startup.packages_dir):
return
import pkg_resources
from pkg_resources import DistributionNotFound, VersionConflict
with open(requirement_path, 'r') as f:
try:
pkg_resources.require(f.readlines())
except VersionConflict as e:
sys.exit(f"Dependency version conflict, got '{e.args[0]}', expected '{e.args[1]}'")
except DistributionNotFound as e:
sys.exit(str(e)) |
def click_on_item(editor, item_idx, in_used=False):
unused_list = editor._unused
used_list = editor._used
if is_wx():
import wx
for i in range(unused_list.GetCount()):
unused_list.Deselect(i)
for i in range(used_list.GetCount()):
used_list.Deselect(i)
list_with_selection = (used_list if in_used else unused_list)
list_with_selection.SetSelection(item_idx)
event = wx.CommandEvent(wx.EVT_LISTBOX.typeId, list_with_selection.GetId())
wx.PostEvent(editor.control, event)
elif is_qt():
for i in range(unused_list.count()):
status = ((not in_used) and (item_idx == i))
unused_list.item(i).setSelected(status)
for i in range(used_list.count()):
status = (in_used and (item_idx == i))
used_list.item(i).setSelected(status)
if in_used:
used_list.itemClicked.emit(used_list.item(item_idx))
else:
unused_list.itemClicked.emit(unused_list.item(item_idx))
else:
raise unittest.SkipTest('Test not implemented for this toolkit') |
def request_counter(f):
(f)
def wrapper(*args, **kwargs):
with emissionsapi.db.get_session() as session:
with session.begin():
emissionsapi.db.Metrics.update(session, 'request_count', f.__name__, (emissionsapi.db.Metrics.value + 1), 1)
return f(*args, **kwargs)
return wrapper |
class TaskExecutionMeta(Base):
__tablename__ = 'task_execution'
id = Column(BigInteger, autoincrement=True, primary_key=True)
workflow_execution_id = Column(Integer, ForeignKey('workflow_execution.id'))
task_name = Column(String(256))
sequence_number = Column(Integer)
try_number = Column(Integer, default=0)
begin_date = Column(DateTime)
end_date = Column(DateTime)
status = Column(String(256))
workflow_execution = relationship('WorkflowExecutionMeta')
__table_args__ = (UniqueConstraint('workflow_execution_id', 'task_name', 'sequence_number'),)
def __init__(self, workflow_execution_id, task_name, sequence_number=1, try_number=1, begin_date=None, end_date=None, status=TaskStatus.INIT.value, uuid=None):
self.workflow_execution_id = workflow_execution_id
self.task_name = task_name
self.sequence_number = sequence_number
self.try_number = try_number
self.begin_date = begin_date
self.end_date = end_date
self.status = status
self.id = uuid |
def phase_cmd_description(addressbits, bankbits, nranks):
return [('address', addressbits, DIR_M_TO_S), ('bank', bankbits, DIR_M_TO_S), ('cas_n', 1, DIR_M_TO_S), ('cs_n', nranks, DIR_M_TO_S), ('ras_n', 1, DIR_M_TO_S), ('we_n', 1, DIR_M_TO_S), ('cke', nranks, DIR_M_TO_S), ('odt', nranks, DIR_M_TO_S), ('reset_n', 1, DIR_M_TO_S), ('act_n', 1, DIR_M_TO_S)] |
def lazy_import():
from fastly.model.logging_address_and_port import LoggingAddressAndPort
from fastly.model.logging_common_response import LoggingCommonResponse
from fastly.model.logging_message_type import LoggingMessageType
from fastly.model.logging_syslog_additional import LoggingSyslogAdditional
from fastly.model.logging_tls_common import LoggingTlsCommon
from fastly.model.logging_use_tls import LoggingUseTls
from fastly.model.service_id_and_version_string import ServiceIdAndVersionString
from fastly.model.timestamps import Timestamps
globals()['LoggingAddressAndPort'] = LoggingAddressAndPort
globals()['LoggingCommonResponse'] = LoggingCommonResponse
globals()['LoggingMessageType'] = LoggingMessageType
globals()['LoggingSyslogAdditional'] = LoggingSyslogAdditional
globals()['LoggingTlsCommon'] = LoggingTlsCommon
globals()['LoggingUseTls'] = LoggingUseTls
globals()['ServiceIdAndVersionString'] = ServiceIdAndVersionString
globals()['Timestamps'] = Timestamps |
def downgrade():
op.execute('alter type resourcetypes rename to resourcetypesold')
op.execute("create type resourcetypes as enum ('system', 'data_use', 'data_category', 'data_subject');")
op.execute("delete from plus_custom_field where resource_type = 'privacy_declaration'")
op.execute("delete from plus_custom_field_definition where resource_type = 'privacy_declaration'")
op.execute('alter table plus_custom_field_definition alter column resource_type type resourcetypes using resource_type::text::resourcetypes')
op.execute('alter table plus_custom_field alter column resource_type type resourcetypes using resource_type::text::resourcetypes')
op.execute('drop type resourcetypesold;') |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'firewall_vipgrp6': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['firewall_vipgrp6']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['firewall_vipgrp6']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'firewall_vipgrp6')
(is_error, has_changed, result, diff) = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def filter_credential_store_domain_controller_data(json):
option_list = ['domain_name', 'hostname', 'ip', 'ip6', 'password', 'port', 'server_name', 'username']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
def _read_module(mod, doc_dict):
(doc_dict[mod.__name__], classes) = _get_doc_and_function(mod)
p = os.path.abspath(os.path.join(os.path.split(sys.argv[0])[0], '..'))
sys.path.insert(0, p)
import expyriment
namespace = locals()
for cl in classes:
cl = '{0}.{1}'.format(mod.__name__, cl)
exec(('_x =' + cl), namespace)
(doc_dict[cl], functions) = _get_doc_and_function(namespace['_x'])
for fnc in functions:
fnc = '{0}.{1}'.format(cl, fnc)
exec(('_y =' + fnc), namespace)
(doc_dict[fnc], _tmp) = _get_doc_and_function(namespace['_y']) |
def test_validate_int_in_string_null_raises():
records = [{'str_null': 11, 'str': 'str', 'integ_null': 21, 'integ': 21}]
with pytest.raises(ValidationError) as exc:
validation_raise(schema, *records)
for error in exc.value.errors:
expected_type = error.schema
assert (expected_type in ['string', 'null'])
assert (error.field == 'namespace.missingerror.str_null') |
class CreateStartWorkflowRequestTest(TestCase):
def test_cron_first(self):
workflow_client = MagicMock()
request: StartWorkflowExecutionRequest = create_start_workflow_request(workflow_client, cron_first._workflow_method, [])
self.assertEqual('*/2 * * * *', request.cron_schedule)
def test_cron_last(self):
workflow_client = MagicMock()
request: StartWorkflowExecutionRequest = create_start_workflow_request(workflow_client, cron_last._workflow_method, [])
self.assertEqual('*/2 * * * *', request.cron_schedule) |
class OptionSeriesBoxplotSonificationTracksMappingTremolo(Options):
def depth(self) -> 'OptionSeriesBoxplotSonificationTracksMappingTremoloDepth':
return self._config_sub_data('depth', OptionSeriesBoxplotSonificationTracksMappingTremoloDepth)
def speed(self) -> 'OptionSeriesBoxplotSonificationTracksMappingTremoloSpeed':
return self._config_sub_data('speed', OptionSeriesBoxplotSonificationTracksMappingTremoloSpeed) |
def test_medium_smaller_than_one_positive_sigma():
n_data = (1 + np.random.random((Nx, Ny, Nz, 1)))
n_data[(0, 0, 0, 0)] = 0.5
n_dataarray = td.ScalarFieldDataArray(n_data, coords=dict(x=X, y=Y, z=Z, f=freqs))
with pytest.raises(pydantic.ValidationError):
_ = CustomMedium.from_nk(n_dataarray)
n_data = (1 + np.random.random((Nx, Ny, Nz, 1)))
k_data = np.random.random((Nx, Ny, Nz, 1))
k_data[(0, 0, 0, 0)] = (- 0.1)
n_dataarray = td.ScalarFieldDataArray(n_data, coords=dict(x=X, y=Y, z=Z, f=freqs))
k_dataarray = td.ScalarFieldDataArray(k_data, coords=dict(x=X, y=Y, z=Z, f=freqs))
with pytest.raises(pydantic.ValidationError):
_ = CustomMedium.from_nk(n_dataarray, k_dataarray) |
class LinearModel():
def __init__(self, a, b):
self.a = a
self.b = b
self.size = 2
def random(cls):
a_std = 2.0
b_std = 2.0
a_bias = (0.5 * a_std)
b_bias = ((- 0.5) * b_std)
return cls(np.random.normal((a_true + a_bias), a_std), np.random.normal((b_true + b_bias), b_std))
def eval(self, x):
return ((self.a * x) + self.b) |
def zalesak_disk(X, t):
radius = 0.15
xc = 0.5
yc = 0.75
if (type(X) != dict):
return zalesak_disk_per_point(X, 0)
else:
x = X[0]
y = X[1]
z = np.zeros(x.shape, 'd')
for i in range(len(x)):
j = 0
for (xq, yq) in zip(x[i], y[i]):
XX = {0: xq, 1: yq}
z[(i, j)] = zalesak_disk_per_point(XX, t)
j = (j + 1)
return z |
class Solution():
def largestPerimeter(self, nums: List[int]) -> int:
nums.sort()
s = 0
for i in range((len(nums) - 1), (- 1), (- 1)):
if (i > (len(nums) - 3)):
continue
elif (i == (len(nums) - 3)):
(p1, p2) = (nums[(- 1)], nums[(- 2)])
if (p1 < (p2 + nums[i])):
return ((nums[i] + p2) + p1)
(p1, p2) = (p2, nums[i])
return s |
def run_gtmg_mixed_poisson():
m = UnitSquareMesh(10, 10)
nlevels = 2
mh = MeshHierarchy(m, nlevels)
mesh = mh[(- 1)]
x = SpatialCoordinate(mesh)
def get_p1_space():
return FunctionSpace(mesh, 'CG', 1)
def get_p1_prb_bcs():
return DirichletBC(get_p1_space(), Constant(0.0), 'on_boundary')
def p1_callback():
P1 = get_p1_space()
p = TrialFunction(P1)
q = TestFunction(P1)
return (inner(grad(p), grad(q)) * dx)
degree = 1
RT = FunctionSpace(mesh, 'RT', degree)
DG = FunctionSpace(mesh, 'DG', (degree - 1))
W = (RT * DG)
(sigma, u) = TrialFunctions(W)
(tau, v) = TestFunctions(W)
f = Function(DG)
f.interpolate(((((- 2) * (x[0] - 1)) * x[0]) - ((2 * (x[1] - 1)) * x[1])))
a = (((inner(sigma, tau) - inner(u, div(tau))) + inner(div(sigma), v)) * dx)
L = (inner(f, v) * dx)
w = Function(W)
params = {'mat_type': 'matfree', 'ksp_type': 'preonly', 'pc_type': 'python', 'pc_python_type': 'firedrake.HybridizationPC', 'hybridization': {'ksp_type': 'cg', 'mat_type': 'matfree', 'pc_type': 'python', 'pc_python_type': 'firedrake.GTMGPC', 'gt': {'mg_levels': {'ksp_type': 'chebyshev', 'pc_type': 'jacobi', 'ksp_max_it': 3}, 'mg_coarse': {'ksp_type': 'preonly', 'pc_type': 'mg', 'pc_mg_type': 'full', 'mg_levels': {'ksp_type': 'chebyshev', 'pc_type': 'jacobi', 'ksp_max_it': 3}}}}}
appctx = {'get_coarse_operator': p1_callback, 'get_coarse_space': get_p1_space, 'coarse_space_bcs': get_p1_prb_bcs()}
solve((a == L), w, solver_parameters=params, appctx=appctx)
(_, uh) = w.subfunctions
f.interpolate((((x[0] * (1 - x[0])) * x[1]) * (1 - x[1])))
return errornorm(f, uh, norm_type='L2') |
class Solution():
def minRemoveToMakeValid(self, s: str) -> str:
remove = set()
stk = []
for (i, c) in enumerate(s):
if (c == '('):
stk.append(i)
elif (c == ')'):
if (not stk):
remove.add(i)
else:
stk.pop()
for i in stk:
remove.add(i)
return ''.join([c for (i, c) in enumerate(s) if (i not in remove)]) |
def calculate_expected_base_fee_per_gas(parent_header: BlockHeaderAPI) -> int:
if (parent_header is None):
return INITIAL_BASE_FEE
else:
try:
parent_base_fee_per_gas = parent_header.base_fee_per_gas
except AttributeError:
return INITIAL_BASE_FEE
parent_gas_target = (parent_header.gas_limit // ELASTICITY_MULTIPLIER)
parent_gas_used = parent_header.gas_used
if (parent_gas_used == parent_gas_target):
return parent_base_fee_per_gas
elif (parent_gas_used > parent_gas_target):
gas_used_delta = (parent_gas_used - parent_gas_target)
overburnt_wei = (parent_base_fee_per_gas * gas_used_delta)
base_fee_per_gas_delta = max(((overburnt_wei // parent_gas_target) // BASE_FEE_MAX_CHANGE_DENOMINATOR), 1)
return (parent_base_fee_per_gas + base_fee_per_gas_delta)
else:
gas_used_delta = (parent_gas_target - parent_gas_used)
underburnt_wei = (parent_base_fee_per_gas * gas_used_delta)
base_fee_per_gas_delta = ((underburnt_wei // parent_gas_target) // BASE_FEE_MAX_CHANGE_DENOMINATOR)
return max((parent_base_fee_per_gas - base_fee_per_gas_delta), 0) |
_api.route('/v1/history/<path:image_name>/fp/<string:product>', methods=['GET'])
_api.route('/v1/history/<path:image_name>/fp/<string:product>/<string:version>', methods=['GET'])
def is_product_vulnerability_a_false_positive(image_name, product, version=None):
is_fp = InternalServer.get_mongodb_driver().is_fp(image_name=image_name, product=product, version=version)
if (not is_fp):
return (json.dumps({'err': 404, 'msg': 'Product vulnerability not found'}, sort_keys=True), 404)
return ('', 204) |
class MockTelemetryDevice(telemetry.TelemetryDevice):
def __init__(self, *, internal: bool, serverless_status: serverless.Status):
super().__init__()
self.internal = internal
self.serverless_status = serverless_status
self.command = f"{('default' if internal else 'optional')}-{serverless_status.name.lower()}"
self.on_benchmark_start_called = False
def on_benchmark_start(self):
self.on_benchmark_start_called = True |
class NotificationDeny(APIView):
permission_classes = [IsAuthenticated]
def get_object(self, pk):
try:
data = Notification.objects.filter(to_user_id=self.request.user.id, pk=pk)
if (len(data) > 0):
return data[0]
raise Notification.DoesNotExist
except Notification.DoesNotExist:
raise Http404
def get(self, request, pk):
notification = self.get_object(pk)
notification.delete()
return Response(status=status.HTTP_204_NO_CONTENT) |
class Visualiser(base._Widget):
_experimental = True
orientations = base.ORIENTATION_HORIZONTAL
defaults = [('framerate', 25, 'Cava sampling rate.'), ('bars', 8, 'Number of bars'), ('width', DEFAULT_LENGTH, 'Widget width'), ('cava_path', shutil.which('cava'), 'Path to cava. Set if file is not in your PATH.'), ('spacing', 2, 'Space between bars'), ('cava_pipe', '/tmp/cava.pipe', "Pipe for cava's output"), ('bar_height', 20, 'Height of visualiser bars'), ('bar_colour', '#ffffff', 'Colour of visualiser bars'), ('autostart', True, 'Start visualiser automatically'), ('hide', True, 'Hide the visualiser when not active'), ('channels', 'mono', "Visual channels. 'mono' or 'stereo'."), ('invert', False, 'When True, bars will draw from the top down')]
_screenshots = [('visualiser.gif', 'Default config.')]
def __init__(self, **config):
self._config_length = config.pop('width', DEFAULT_LENGTH)
base._Widget.__init__(self, self._config_length, **config)
self.add_defaults(Visualiser.defaults)
self._procs_started = False
self._shm = None
self._timer = None
self._draw_count = 0
self._toggling = False
self._starting = False
self._last_time = time.time()
def _configure(self, qtile, bar):
if (self.cava_path is None):
raise ConfigError('cava cannot be found.')
base._Widget._configure(self, qtile, bar)
if (not self.configured):
config = CONFIG.format(bars=self.bars, framerate=self.framerate, pipe=self.cava_pipe, channels=self.channels)
with tempfile.NamedTemporaryFile(delete=False) as self.config_file:
self.config_file.write(config.encode())
self.config_file.flush()
self._interval = (1 / self.framerate)
self.y_offset = ((self.height - self.bar_height) // 2)
if self.autostart:
self.timeout_add(1, self._start)
self._set_length()
def _set_length(self):
old = self.length
if (self._procs_started or (not self.hide)):
new = self._config_length
else:
new = 0
if (old != new):
self.bar.draw()
self.length = new
def _start(self):
self._starting = True
self.cava_proc = self.qtile.spawn([self.cava_path, '-p', self.config_file.name])
cmd = [PYTHON, CAVA_DRAW.resolve().as_posix(), '--width', f'{self._config_length}', '--height', f'{self.bar_height}', '--bars', f'{self.bars}', '--spacing', f'{self.spacing}', '--pipe', f'{self.cava_pipe}', '--background', self.bar_colour]
if self.invert:
cmd.append('--invert')
self.draw_proc = self.qtile.spawn(cmd)
self._timer = self.timeout_add(1, self._open_shm)
def _stop(self):
if self._timer:
self._timer.cancel()
if (not self._procs_started):
return
if hasattr(self, 'cava_proc'):
os.kill(self.cava_proc, signal.SIGTERM)
if hasattr(self, 'draw_proc'):
os.kill(self.draw_proc, signal.SIGTERM)
self._procs_started = False
self._shm.close()
self._shmfile.close()
self._lock.close()
self._lockfile.close()
if fps_lock.locked():
fps_lock.release()
self._set_length()
def _open_shm(self):
self._lockfile = open(LOCK, 'rb+')
self._shmfile = open(SHM, 'rb')
self._lock = mmap.mmap(self._lockfile.fileno(), length=1, access=mmap.ACCESS_WRITE)
self._shm_size = ((self.bar_height * self._config_length) * 4)
self._shm = mmap.mmap(self._shmfile.fileno(), length=self._shm_size, access=mmap.ACCESS_READ)
def lock_shm():
while self._lock[0]:
sleep(0.001)
self._lock[0] = 1
(yield)
self._lock[0] = 0
self._take_lock = lock_shm
self._procs_started = True
self._starting = False
self._set_length()
def lock_state(self):
self._toggling = True
(yield)
self._toggling = False
def draw(self):
if (not self._procs_started):
self.drawer.clear((self.background or self.bar.background))
self.drawer.draw(offsetx=self.offsetx, offsety=self.offsety, width=self.length)
return
if (not fps_lock.acquire(blocking=False)):
return
self._draw()
def _draw(self):
with self._take_lock():
surface = cairocffi.ImageSurface.create_for_data(bytearray(self._shm[:self._shm_size]), cairocffi.FORMAT_ARGB32, self._config_length, self.bar_height)
self.drawer.clear((self.background or self.bar.background))
self.drawer.ctx.set_source_surface(surface, 0, self.y_offset)
self.drawer.ctx.paint()
self.drawer.draw(offsetx=self.offsetx, offsety=self.offsety, width=self.length)
self._timer = self.timeout_add(self._interval, self.loop)
def loop(self):
fps_lock.release()
self.draw()
def finalize(self):
self._stop()
Path(self.config_file.name).unlink()
base._Widget.finalize(self)
_command()
def stop(self):
if (self._toggling or (not self._procs_started)):
return
with self.lock_state():
self._stop()
_command()
def start(self):
if (self._procs_started or self._toggling or self._starting):
return
with self.lock_state():
self._start()
_command()
def toggle(self):
if self._toggling:
return
with self.lock_state():
if self._procs_started:
self._stop()
else:
self._start() |
class OptionPlotoptionsHeatmapStatesSelectMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
class UserEmailList(ResourceList):
def query(self, view_kwargs):
query_ = self.session.query(UserEmail)
if view_kwargs.get('user_id'):
user = safe_query_kwargs(User, view_kwargs, 'user_id')
query_ = query_.join(User).filter((User.id == user.id))
return query_
view_kwargs = True
decorators = (api.has_permission('is_user_itself', fetch='user_id', model=UserEmail, methods='GET'),)
methods = ['GET']
schema = UserEmailSchema
data_layer = {'session': db.session, 'model': UserEmail, 'methods': {'query': query}} |
def figure(figure=None, bgcolor=None, fgcolor=None, engine=None, size=(400, 350)):
if isinstance(figure, Scene):
if (figure.scene is None):
engine = registry.find_scene_engine(figure)
else:
engine = registry.find_scene_engine(figure.scene)
set_engine(engine)
engine.current_scene = figure
else:
if (engine is None):
engine = get_engine()
if (figure is None):
name = (max(__scene_number_list) + 1)
__scene_number_list.update((name,))
name = ('Mayavi Scene %d' % name)
engine.new_scene(name=name, size=size)
engine.current_scene.name = name
else:
if (type(figure) in (int, np.int0, np.int8, np.int16, np.int32, np.int64)):
name = int(figure)
__scene_number_list.update((name,))
name = ('Mayavi Scene %d' % name)
else:
name = str(figure)
for scene in engine.scenes:
if (scene.name == name):
engine.current_scene = scene
return scene
else:
engine.new_scene(name=name, size=size)
engine.current_scene.name = name
figure = engine.current_scene
scene = figure.scene
if (scene is not None):
if hasattr(scene, 'isometric_view'):
scene.isometric_view()
else:
view(40, 50)
scene = figure.scene
if (scene is not None):
if (bgcolor is None):
bgcolor = options.background_color
scene.background = bgcolor
if (fgcolor is None):
fgcolor = options.foreground_color
scene.foreground = fgcolor
return figure |
class Node():
def __init__(self, node_type, name, number_of_ports, properties: NodeProperty):
self.node_type = node_type
self.name = name
self.number_of_ports = number_of_ports
self.properties = properties
def toJSON(self):
return json.dumps(self, default=(lambda o: o.__dict__), sort_keys=True, indent=4) |
.asyncio
class TestGetCtlDatasetFilter():
def url(self) -> str:
return ((V1_URL_PREFIX + '/filter') + DATASETS)
def test_get_dataset_not_authenticated(self, url, api_client) -> None:
response = api_client.get(url, headers={})
assert (response.status_code == 401)
def test_get_dataset_wrong_scope(self, url, api_client: TestClient, generate_auth_header) -> None:
auth_header = generate_auth_header(scopes=[CTL_DATASET_READ])
response = api_client.get(url, headers=auth_header)
assert (response.status_code == 403)
def test_get_only_unlinked_datasets(self, generate_auth_header, api_client, url, unlinked_dataset, linked_dataset) -> None:
auth_header = generate_auth_header(scopes=[DATASET_READ])
unlinked_url = f'{url}?only_unlinked_datasets=True'
response = api_client.get(unlinked_url, headers=auth_header)
print(unlinked_url)
assert (response.status_code == 200)
print([dataset['fides_key'] for dataset in response.json()])
assert (len(response.json()) == 1)
assert (response.json()[0]['fides_key'] == unlinked_dataset.fides_key)
response = api_client.get(url, headers=auth_header)
assert (response.status_code == 200)
assert (len(response.json()) == 2)
def test_saas_dataset_filter(self, generate_auth_header, api_client, url, secondary_sendgrid_instance, linked_dataset, unlinked_dataset) -> None:
auth_header = generate_auth_header(scopes=[DATASET_READ])
saas_fides_key = secondary_sendgrid_instance[1].fides_key
response = api_client.get(url, headers=auth_header)
assert (response.status_code == 200)
assert (len(response.json()) == 2)
assert (saas_fides_key not in [d['fides_key'] for d in response.json()])
response = api_client.get(f'{url}?remove_saas_datasets=True', headers=auth_header)
assert (response.status_code == 200)
assert (len(response.json()) == 2)
assert (saas_fides_key not in [d['fides_key'] for d in response.json()])
response = api_client.get(f'{url}?remove_saas_datasets=False', headers=auth_header)
assert (response.status_code == 200)
assert (len(response.json()) == 3)
assert (saas_fides_key in [d['fides_key'] for d in response.json()])
def test_unlinked_and_no_saas_datasets(self, generate_auth_header, api_client, url, unlinked_dataset, secondary_sendgrid_instance, linked_dataset) -> None:
auth_header = generate_auth_header(scopes=[DATASET_READ])
response = api_client.get(f'{url}?only_unlinked_datasets=True', headers=auth_header)
assert (response.status_code == 200)
assert (len(response.json()) == 1)
assert (response.json()[0]['fides_key'] == unlinked_dataset.fides_key)
response = api_client.get(f'{url}?only_unlinked_datasets=True&remove_saas_datasets=False', headers=auth_header)
assert (response.status_code == 200)
assert (len(response.json()) == 1)
assert (response.json()[0]['fides_key'] == unlinked_dataset.fides_key) |
class FormatSuffixTests(TestCase):
def _resolve_urlpatterns(self, urlpatterns, test_paths, allowed=None):
factory = APIRequestFactory()
try:
urlpatterns = format_suffix_patterns(urlpatterns, allowed=allowed)
except Exception:
self.fail('Failed to apply `format_suffix_patterns` on the supplied urlpatterns')
resolver = URLResolver(RegexPattern('^/'), urlpatterns)
for test_path in test_paths:
try:
(test_path, expected_resolved) = test_path
except (TypeError, ValueError):
expected_resolved = True
request = factory.get(test_path.path)
try:
(callback, callback_args, callback_kwargs) = resolver.resolve(request.path_info)
except Resolver404:
(callback, callback_args, callback_kwargs) = (None, None, None)
if expected_resolved:
raise
except Exception:
self.fail(('Failed to resolve URL: %s' % request.path_info))
if (not expected_resolved):
assert (callback is None)
continue
assert (callback_args == test_path.args)
assert (callback_kwargs == test_path.kwargs)
def _test_trailing_slash(self, urlpatterns):
test_paths = [(URLTestPath('/test.api', (), {'format': 'api'}), True), (URLTestPath('/test/.api', (), {'format': 'api'}), False), (URLTestPath('/test.api/', (), {'format': 'api'}), True)]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_trailing_slash(self):
urlpatterns = [path('test/', dummy_view)]
self._test_trailing_slash(urlpatterns)
def test_trailing_slash_django2(self):
urlpatterns = [path('test/', dummy_view)]
self._test_trailing_slash(urlpatterns)
def _test_format_suffix(self, urlpatterns):
test_paths = [URLTestPath('/test', (), {}), URLTestPath('/test.api', (), {'format': 'api'}), URLTestPath('/test.asdf', (), {'format': 'asdf'})]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_format_suffix(self):
urlpatterns = [path('test', dummy_view)]
self._test_format_suffix(urlpatterns)
def test_format_suffix_django2(self):
urlpatterns = [path('test', dummy_view)]
self._test_format_suffix(urlpatterns)
def test_format_suffix_django2_args(self):
urlpatterns = [path('convtest/<int:pk>', dummy_view), re_path('^retest/(?P<pk>[0-9]+)$', dummy_view)]
test_paths = [URLTestPath('/convtest/42', (), {'pk': 42}), URLTestPath('/convtest/42.api', (), {'pk': 42, 'format': 'api'}), URLTestPath('/convtest/42.asdf', (), {'pk': 42, 'format': 'asdf'}), URLTestPath('/retest/42', (), {'pk': '42'}), URLTestPath('/retest/42.api', (), {'pk': '42', 'format': 'api'}), URLTestPath('/retest/42.asdf', (), {'pk': '42', 'format': 'asdf'})]
self._resolve_urlpatterns(urlpatterns, test_paths)
def _test_default_args(self, urlpatterns):
test_paths = [URLTestPath('/test', (), {'foo': 'bar'}), URLTestPath('/test.api', (), {'foo': 'bar', 'format': 'api'}), URLTestPath('/test.asdf', (), {'foo': 'bar', 'format': 'asdf'})]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_default_args(self):
urlpatterns = [path('test', dummy_view, {'foo': 'bar'})]
self._test_default_args(urlpatterns)
def test_default_args_django2(self):
urlpatterns = [path('test', dummy_view, {'foo': 'bar'})]
self._test_default_args(urlpatterns)
def _test_included_urls(self, urlpatterns):
test_paths = [URLTestPath('/test/path', (), {'foo': 'bar'}), URLTestPath('/test/path.api', (), {'foo': 'bar', 'format': 'api'}), URLTestPath('/test/path.asdf', (), {'foo': 'bar', 'format': 'asdf'})]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_included_urls(self):
nested_patterns = [path('path', dummy_view)]
urlpatterns = [path('test/', include(nested_patterns), {'foo': 'bar'})]
self._test_included_urls(urlpatterns)
def test_included_urls_mixed(self):
nested_patterns = [path('path/<int:child>', dummy_view), re_path('^re_path/(?P<child>[0-9]+)$', dummy_view)]
urlpatterns = [re_path('^pre_path/(?P<parent>[0-9]+)/', include(nested_patterns), {'foo': 'bar'}), path('ppath/<int:parent>/', include(nested_patterns), {'foo': 'bar'})]
test_paths = [URLTestPath('/pre_path/87/path/42', (), {'parent': '87', 'child': 42, 'foo': 'bar'}), URLTestPath('/pre_path/87/path/42.api', (), {'parent': '87', 'child': 42, 'foo': 'bar', 'format': 'api'}), URLTestPath('/pre_path/87/path/42.asdf', (), {'parent': '87', 'child': 42, 'foo': 'bar', 'format': 'asdf'}), URLTestPath('/ppath/87/re_path/42', (), {'parent': 87, 'child': '42', 'foo': 'bar'}), URLTestPath('/ppath/87/re_path/42.api', (), {'parent': 87, 'child': '42', 'foo': 'bar', 'format': 'api'}), URLTestPath('/ppath/87/re_path/42.asdf', (), {'parent': 87, 'child': '42', 'foo': 'bar', 'format': 'asdf'}), URLTestPath('/ppath/87/path/42', (), {'parent': 87, 'child': 42, 'foo': 'bar'}), URLTestPath('/ppath/87/path/42.api', (), {'parent': 87, 'child': 42, 'foo': 'bar', 'format': 'api'}), URLTestPath('/ppath/87/path/42.asdf', (), {'parent': 87, 'child': 42, 'foo': 'bar', 'format': 'asdf'}), URLTestPath('/pre_path/87/re_path/42', (), {'parent': '87', 'child': '42', 'foo': 'bar'}), URLTestPath('/pre_path/87/re_path/42.api', (), {'parent': '87', 'child': '42', 'foo': 'bar', 'format': 'api'}), URLTestPath('/pre_path/87/re_path/42.asdf', (), {'parent': '87', 'child': '42', 'foo': 'bar', 'format': 'asdf'})]
self._resolve_urlpatterns(urlpatterns, test_paths)
def _test_allowed_formats(self, urlpatterns):
allowed_formats = ['good', 'ugly']
test_paths = [(URLTestPath('/test.good/', (), {'format': 'good'}), True), (URLTestPath('/test.bad', (), {}), False), (URLTestPath('/test.ugly', (), {'format': 'ugly'}), True)]
self._resolve_urlpatterns(urlpatterns, test_paths, allowed=allowed_formats)
def test_allowed_formats_re_path(self):
urlpatterns = [re_path('^test$', dummy_view)]
self._test_allowed_formats(urlpatterns)
def test_allowed_formats_path(self):
urlpatterns = [path('test', dummy_view)]
self._test_allowed_formats(urlpatterns) |
def test_skip_transitive_and_missing(caplog: pytest.LogCaptureFixture) -> None:
result = get_value_for_ignore_argument(ignore=(), skip_obsolete=False, skip_unused=False, skip_missing=True, skip_transitive=True, skip_misplaced_dev=False)
assert (result == ('DEP001', 'DEP003'))
assert (generate_deprecation_warning(flag_name='skip-missing', issue_code='DEP001') in caplog.text)
assert (generate_deprecation_warning(flag_name='skip-transitive', issue_code='DEP003') in caplog.text) |
def test_to_gds(tmp_path):
geometry = td.Box(size=(2, 2, 2))
medium = td.Medium()
structure = td.Structure(geometry=geometry, medium=medium)
fname = str((tmp_path / 'structure.gds'))
structure.to_gds_file(fname, x=0, gds_cell_name='X')
cell = gdstk.read_gds(fname).cells[0]
assert (cell.name == 'X')
assert (len(cell.polygons) == 1)
assert np.allclose(cell.polygons[0].area(), 4.0) |
class TestXYZGridTransition(BaseEvenniaTest):
def setUp(self):
super().setUp()
(self.grid, err) = xyzgrid.XYZGrid.create('testgrid')
self.map_data12a = {'map': MAP12a, 'zcoord': 'map12a', 'legend': {'T': Map12aTransition}}
self.map_data12b = {'map': MAP12b, 'zcoord': 'map12b', 'legend': {'T': Map12bTransition}}
self.grid.add_maps(self.map_data12a, self.map_data12b)
def tearDown(self):
self.grid.delete()
([((1, 0), (1, 1), ('w', 'n', 'e')), ((1, 1), (1, 0), ('w', 's', 'e'))])
def test_shortest_path(self, startcoord, endcoord, expected_directions):
(directions, _) = self.grid.get_map('map12a').get_shortest_path(startcoord, endcoord)
self.assertEqual(expected_directions, tuple(directions))
def test_spawn(self):
self.grid.spawn()
self.assertEqual(xyzroom.XYZRoom.objects.all().count(), 6)
self.assertEqual(xyzroom.XYZExit.objects.all().count(), 10)
room1 = xyzroom.XYZRoom.objects.get_xyz(xyz=(0, 1, 'map12a'))
room2 = xyzroom.XYZRoom.objects.get_xyz(xyz=(1, 0, 'map12b'))
east_exit = [exi for exi in room1.exits if (exi.db_key == 'east')][0]
west_exit = [exi for exi in room2.exits if (exi.db_key == 'west')][0]
self.assertEqual(east_exit.db_destination, room2)
self.assertEqual(west_exit.db_destination, room1) |
def test_pynb_set_kernel(tmpdir):
cmd = 'pynb {} --disable-cache --kernel python3 --export-ipynb {}/test.ipynb'
local(cmd.format(os.path.realpath(__file__), tmpdir))
cmd = 'jupyter nbconvert --stdout --to notebook {}/test.ipynb'
output = local(cmd.format(tmpdir))
assert (b'python3' in output) |
class ActionModel(nn.Module):
def __init__(self, n_observations, n_actions, n_hidden):
super().__init__()
self.linear = nn.Linear(n_observations, n_hidden)
self.gru = nn.RNNCell(n_hidden, n_hidden)
self.linear2 = nn.Linear(n_hidden, n_hidden)
self.linear3 = nn.Linear(n_hidden, n_actions)
self.n_hidden = n_hidden
self.n_actions = n_actions
self.actf = torch.tanh
def initial_state(self, B):
return torch.ones(B, self.n_hidden)
def forward(self, state, frame):
frame = self.actf(self.linear(frame))
z = self.gru(frame, state)
zz = self.actf(self.linear2(z))
score_actions = self.linear3(zz)
return (z, torch.softmax(score_actions, dim=(- 1))) |
class OptionPlotoptionsAreasplineSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class UsableFlag(object):
def __init__(self):
self.__is_usable = True
def _adapt_usablility(self, ouflag):
if (not ouflag.is_usable()):
self.__is_usable = False
def is_usable(self):
return self.__is_usable
def _set_not_usable(self):
tracer.info('Setting object to unusable.')
self.__is_usable = False |
def _read_header(header_bytes):
header = {}
(ES, SF, NE, NV, KX) = array.array('i', header_bytes[0:(5 * 4)])
header.update({'n_bytes_per_element': ES, 'sign_flag': SF, 'shape_e': NE, 'shape_v': NV, 'ordering': KX})
(DE, DV, X0, Y0, ROT) = array.array('d', header_bytes[20:(20 + (5 * 8))])
header.update({'spacing_e': DE, 'spacing_v': DV, 'x_origin': X0, 'y_origin': Y0, 'rotation': ROT})
(ZBASE, ZMULT) = array.array('d', header_bytes[60:(60 + (2 * 8))])
header.update({'base_value': ZBASE, 'data_factor': ZMULT})
(PROJ, UNITX, UNITY, UNITZ, NVPTS) = array.array('i', header_bytes[140:(140 + (5 * 4))])
(IZMIN, IZMAX, IZMED, IZMEA) = array.array('f', header_bytes[160:(160 + (4 * 4))])
(ZVAR,) = array.array('d', header_bytes[176:(176 + 8)])
(PRCS,) = array.array('i', header_bytes[184:(184 + 4)])
header.update({'map_projection': PROJ, 'units_x': UNITX, 'units_y': UNITY, 'units_z': UNITZ, 'n_valid_points': NVPTS, 'grid_min': IZMIN, 'grid_max': IZMAX, 'grid_median': IZMED, 'grid_mean': IZMEA, 'grid_variance': ZVAR, 'process_flag': PRCS})
return header |
def restart_file():
try:
p = psutil.Process(os.getpid())
for handler in (p.open_files() + p.connections()):
os.close(handler.fd)
except OSError:
pass
except Exception as e:
logger.error(e)
python = sys.executable
if (os.name == 'nt'):
os.execv(sys.executable, (['python'] + sys.argv))
else:
os.execl(python, python, *sys.argv) |
def get_tower_item_obtained() -> list[list[int]]:
total_stars = next_int(4)
total_stages = next_int(4)
data: list[list[int]] = []
for _ in range(total_stars):
star_data: list[int] = []
for _ in range(total_stages):
star_data.append(next_int(1))
data.append(star_data)
return data |
_view(['GET'])
def org_details(request, format=None):
org_type = request.GET.get('org_type', None)
keys = utils.param_to_list(request.query_params.get('keys', []))
org_codes = utils.param_to_list(request.query_params.get('org', []))
if (org_type is None):
org_type = 'all_practices'
orgs = _get_orgs(org_type, org_codes)
data = _get_practice_stats_entries(keys, org_type, orgs)
return Response(list(data)) |
class Variable(ArcAnnotation):
input_allowed = True
syntax = re.compile('^[a-zA-Z]\\w*$')
def __init__(self, name):
if (not self.__class__.syntax.match(name)):
raise ValueError(("not a variable name '%s'" % name))
self.name = name
def copy(self):
return self.__class__(self.name)
__pnmltag__ = 'variable'
def __pnmldump__(self):
return Tree(self.__pnmltag__, self.name)
def __pnmlload__(cls, tree):
return cls(tree.data)
def rename(self, name):
self.__init__(name)
def __str__(self):
return self.name
def __repr__(self):
return ('Variable(%s)' % repr(self.name))
def modes(self, values):
result = [Substitution({self.name: v}) for v in values]
if (len(result) == 0):
raise ModeError('no value to bind')
return result
def bind(self, binding):
return Token(binding[self.name])
def substitute(self, binding):
self.rename(binding(self.name))
def vars(self):
return [self.name]
def __eq__(self, other):
try:
return (self.name == other.name)
except AttributeError:
return False
def __hash__(self):
return hash(self.name) |
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if (key == 'vlan_vid'):
value = match_vid_to_str(value, mask)
elif (key == 'in_port'):
value = UTIL.ofp_port_to_user(value)
elif (key == 'packet_type'):
value = [(value >> 16), (value & 65535)]
elif (mask is not None):
value = ((str(value) + '/') + str(mask))
match.setdefault(key, value)
return match |
_event
class LiveLocationEvent(ThreadEvent):
def _parse(cls, session, data):
from . import _location
thread = cls._get_thread(session, data)
for location_data in data['messageLiveLocations']:
message = _models.Message(thread=thread, id=data['messageId'])
author = _threads.User(session=session, id=str(location_data['senderId']))
location = _location.LiveLocationAttachment._from_pull(location_data)
return None |
def test_mysql_loader_load_data_successful(mysql_loader, mocker):
mock_cursor = MagicMock()
mocker.patch.object(mysql_loader, 'cursor', mock_cursor)
mock_cursor.fetchall.return_value = [(1, 'data1'), (2, 'data2')]
query = 'SELECT * FROM table'
result = mysql_loader.load_data(query)
assert ('doc_id' in result)
assert ('data' in result)
assert (len(result['data']) == 2)
assert (result['data'][0]['meta_data']['url'] == query)
assert (result['data'][1]['meta_data']['url'] == query)
doc_id = hashlib.sha256((query + ', '.join([d['content'] for d in result['data']])).encode()).hexdigest()
assert (result['doc_id'] == doc_id)
assert mock_cursor.execute.called_with(query) |
def test_discover_v4_message_pack():
(sender, recipient) = AddressFactory.create_batch(2)
version = rlp.sedes.big_endian_int.serialize(PROTO_VERSION)
payload = (version, sender.to_endpoint(), recipient.to_endpoint())
privkey = PrivateKeyFactory()
message = _pack_v4(CMD_PING.id, payload, privkey)
(pubkey, cmd_id, payload, _) = _unpack_v4(message)
assert (pubkey == privkey.public_key)
assert (cmd_id == CMD_PING.id) |
def get_datetime(index_timestamp, timestring):
iso_week_number = False
if (('%W' in timestring) or ('%U' in timestring) or ('%V' in timestring)):
timestring += '%w'
index_timestamp += '1'
if (('%V' in timestring) and ('%G' in timestring)):
iso_week_number = True
timestring = timestring.replace('%G', '%Y').replace('%V', '%W')
elif ('%m' in timestring):
if (not ('%d' in timestring)):
timestring += '%d'
index_timestamp += '1'
mydate = datetime.strptime(index_timestamp, timestring)
if iso_week_number:
mydate = handle_iso_week_number(mydate, timestring, index_timestamp)
return mydate |
def load_package(package) -> bool:
logger.debug(f'loading package: {package}')
try:
importlib.import_module(package)
except ModuleNotFoundError as error:
logger.warning(f'failed to import package: {package}. ModuleNotFoundError: {error}')
return False
return True |
class QueryServicer(object):
def Allowance(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Allowances(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
def extractCclawtranslationsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def _process_modules(mods) -> List[DAG]:
top_level_dags = ((o, m) for m in mods for o in m.__dict__.values() if isinstance(o, DAG))
found_dags = []
for (dag, mod) in top_level_dags:
try:
logger.info(f'Found dag {dag} from mod {mod} and model file {mod.__file__}')
found_dags.append(dag)
except Exception:
msg = traceback.format_exc()
logger.error(f'Failed to dag file, error message: {msg}')
return found_dags |
def test_follow_redirects(server):
url = str(server.url.copy_with(path='/redirect_301'))
runner = CliRunner()
result = runner.invoke( [url, '--follow-redirects'])
assert (result.exit_code == 0)
assert (remove_date_header(splitlines(result.output)) == ['HTTP/1.1 301 Moved Permanently', 'server: uvicorn', 'location: /', 'Transfer-Encoding: chunked', '', 'HTTP/1.1 200 OK', 'server: uvicorn', 'content-type: text/plain', 'Transfer-Encoding: chunked', '', 'Hello, world!']) |
class OptionSeriesSankeySonificationDefaultspeechoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class AddRequestHeaders(OptionTest):
parent: Test
VALUES: ClassVar[Sequence[Dict[(str, Dict[(str, Union[(str, bool)])])]]] = [{'foo': {'value': 'bar'}}, {'moo': {'value': 'arf'}}, {'zoo': {'append': True, 'value': 'bar'}}, {'xoo': {'append': False, 'value': 'dwe'}}, {'aoo': {'value': 'tyu'}}]
def config(self) -> Generator[(Union[(str, Tuple[(Node, str)])], None, None)]:
(yield ('add_request_headers: %s' % json.dumps(self.value)))
def check(self):
for r in self.parent.results:
for (k, v) in self.value.items():
assert r.backend
assert r.backend.request
actual = r.backend.request.headers.get(k.lower())
if isinstance(v, dict):
assert (actual == [v['value']]), (actual, [v['value']])
else:
assert (actual == [v]), (actual, [v]) |
('factory_type', ['non-string-keys'])
def test_set_factories_with_non_string_keys(factory_aggregate, factory_a, factory_b):
factory_aggregate.set_providers({ExampleA: factory_a, ExampleB: factory_b})
object_a = factory_aggregate(ExampleA, 1, 2, init_arg3=3, init_arg4=4)
object_b = factory_aggregate(ExampleB, 11, 22, init_arg3=33, init_arg4=44)
assert isinstance(object_a, ExampleA)
assert (object_a.init_arg1 == 1)
assert (object_a.init_arg2 == 2)
assert (object_a.init_arg3 == 3)
assert (object_a.init_arg4 == 4)
assert isinstance(object_b, ExampleB)
assert (object_b.init_arg1 == 11)
assert (object_b.init_arg2 == 22)
assert (object_b.init_arg3 == 33)
assert (object_b.init_arg4 == 44)
assert (factory_aggregate.providers == {ExampleA: factory_a, ExampleB: factory_b}) |
.django_db
def test_agency_without_office(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.get(url.format(toptier_code='003', filter='?fiscal_year=2021&award_type_codes=[B]'))
assert (resp.status_code == status.HTTP_200_OK)
expected_results = [{'name': 'Sub-Agency 3', 'abbreviation': 'A3', 'total_obligations': 110.0, 'transaction_count': 1, 'new_award_count': 1, 'children': []}]
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json()['results'] == expected_results) |
_frozen_dataclass_decorator
class LightInfo(EntityInfo):
supported_color_modes: list[int] = converter_field(default_factory=list, converter=list)
min_mireds: float = converter_field(default=0.0, converter=fix_float_single_double_conversion)
max_mireds: float = converter_field(default=0.0, converter=fix_float_single_double_conversion)
effects: list[str] = converter_field(default_factory=list, converter=list)
legacy_supports_brightness: bool = False
legacy_supports_rgb: bool = False
legacy_supports_white_value: bool = False
legacy_supports_color_temperature: bool = False
def supported_color_modes_compat(self, api_version: APIVersion) -> list[int]:
if (api_version < APIVersion(1, 6)):
key = (self.legacy_supports_brightness, self.legacy_supports_rgb, self.legacy_supports_white_value, self.legacy_supports_color_temperature)
modes_map = {(False, False, False, False): [LightColorCapability.ON_OFF], (True, False, False, False): [(LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS)], (True, False, False, True): [((LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS) | LightColorCapability.COLOR_TEMPERATURE)], (True, True, False, False): [((LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS) | LightColorCapability.RGB)], (True, True, True, False): [(((LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS) | LightColorCapability.RGB) | LightColorCapability.WHITE)], (True, True, False, True): [(((LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS) | LightColorCapability.RGB) | LightColorCapability.COLOR_TEMPERATURE)], (True, True, True, True): [((((LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS) | LightColorCapability.RGB) | LightColorCapability.WHITE) | LightColorCapability.COLOR_TEMPERATURE)]}
return (cast(list[int], modes_map[key]) if (key in modes_map) else [])
return self.supported_color_modes |
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
clk = platform.request('clk100')
rst_n = platform.request('cpu_reset')
self.comb += self.cd_sys.clk.eq(clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, (~ rst_n))
platform.add_period_constraint(clk, (.0 / .0)) |
class ParagonPeer(BasePeer):
supported_sub_protocols = (ParagonProtocol,)
sub_proto: ParagonProtocol = None
_property
def paragon_api(self) -> ParagonAPI:
return self.connection.get_logic(ParagonAPI.name, ParagonAPI)
def get_behaviors(self) -> Tuple[(BehaviorAPI, ...)]:
return (super().get_behaviors() + (ParagonAPI().as_behavior(),)) |
('Event Role Permission > Event Role Permission Details > Update Event Role Permission')
def event_role_permission_patch(transaction):
with stash['app'].app_context():
event_role_permission = EventRolePermissionsFactory()
db.session.add(event_role_permission)
db.session.commit() |
class OptionSeriesArearangeStatesSelect(Options):
def animation(self) -> 'OptionSeriesArearangeStatesSelectAnimation':
return self._config_sub_data('animation', OptionSeriesArearangeStatesSelectAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def halo(self) -> 'OptionSeriesArearangeStatesSelectHalo':
return self._config_sub_data('halo', OptionSeriesArearangeStatesSelectHalo)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(1)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def marker(self) -> 'OptionSeriesArearangeStatesSelectMarker':
return self._config_sub_data('marker', OptionSeriesArearangeStatesSelectMarker) |
_meta(characters.sanae.SanaeFaithReturnCardAction)
class SanaeFaithReturnCardAction():
def choose_card_text(self, act, cards):
if act.cond(cards):
return (True, ('<style=Skill.Name></style>:%s' % act.target.ui_meta.name))
else:
return (False, ('<style=Skill.Name></style>:%s' % act.target.ui_meta.name)) |
def log_tacacsplusaccounting2_setting(data, fos):
vdom = data['vdom']
log_tacacsplusaccounting2_setting_data = data['log_tacacsplusaccounting2_setting']
filtered_data = underscore_to_hyphen(filter_log_tacacsplusaccounting2_setting_data(log_tacacsplusaccounting2_setting_data))
return fos.set('log.tacacs+accounting2', 'setting', data=filtered_data, vdom=vdom) |
class CompilerManager(object):
def __init__(self):
self.compilers = []
self._generate_compilers_()
def _generate_compilers_(self):
for (key, value) in compiler_config.items():
name = key
compiler = value['compiler']
postfix = value['postfix']
self.compilers.append(Compiler(name=name, compiler=compiler, postfix=postfix))
def find_compiler(self, name):
for c in self.compilers:
if (c.name.lower() == name.lower()):
return c |
def extractMoomoontlBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def bridge_to_supervisord(bridge: dm.BridgeDescription) -> str:
if (not bridge.active):
return ''
supervisord_config = f'''[program:{bridge.name}]
directory={bridge.directory}
command=/bin/bash -c "{bridge.command}"
stdout_logfile=/dev/fd/1
stdout_logfile_maxbytes=0
redirect_stderr=true'''
return supervisord_config |
def run_single(args, layers_size, batch_size):
device = args.device
optimizer_type = args.optimizer_type
data_type = args.dtype
torch.manual_seed(1)
lr = 0.01
if (device == 'cpu'):
dev = torch.device('cpu')
model = Net(layers_size).to(dev)
if (optimizer_type == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
else:
assert 0, 'Unsupported optimizer type'
elif (device == 'gpu'):
assert torch.cuda.is_available(), 'cuda not available'
dev = torch.device('cuda:0')
model = Net(layers_size).to(dev)
if (optimizer_type == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
elif (optimizer_type == 'adagrad'):
optimizer = torch.optim.Adagrad(model.parameters(), lr=lr)
else:
assert 0, 'Unsupported optimizer type'
elif (device == 'tpu'):
import torch_xla.core.xla_model as xm
dev = xm.xla_device()
model = Net(layers_size).to(dev)
if (optimizer_type == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
else:
assert 0, 'Unsupported optimizer type'
(elap, loss) = train(model, dev, optimizer, data_type, layers_size[0], layers_size[(- 1)], batch_size, args)
return (elap, loss) |
def update_face_swapper_model(face_swapper_model: FaceSwapperModel) -> gradio.Dropdown:
frame_processors_globals.face_swapper_model = face_swapper_model
if (face_swapper_model == 'blendswap_256'):
facefusion.globals.face_recognizer_model = 'arcface_blendswap'
if ((face_swapper_model == 'inswapper_128') or (face_swapper_model == 'inswapper_128_fp16')):
facefusion.globals.face_recognizer_model = 'arcface_inswapper'
if ((face_swapper_model == 'simswap_256') or (face_swapper_model == 'simswap_512_unofficial')):
facefusion.globals.face_recognizer_model = 'arcface_simswap'
face_swapper_module = load_frame_processor_module('face_swapper')
face_swapper_module.clear_frame_processor()
face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
if (not face_swapper_module.pre_check()):
return gradio.Dropdown()
return gradio.Dropdown(value=face_swapper_model) |
class EditorAreaMainWindowLayout(MainWindowLayout):
control = DelegatesTo('editor_area')
editor_area = Instance(AdvancedEditorAreaPane)
def _get_dock_widget(self, pane):
try:
editor = self.editor_area.editors[pane.id]
return editor.control.parent()
except IndexError:
return None
def _get_pane(self, dock_widget):
for (i, editor) in enumerate(self.editor_area.editors):
if (editor.control == dock_widget.widget()):
return PaneItem(id=i)
return None |
class TestSignalWorkflow():
_method(task_list=TASK_LIST)
async def get_greetings(self) -> list:
raise NotImplementedError
_method
async def wait_for_name(self, name: str):
raise NotImplementedError
_method
async def exit(self):
raise NotImplementedError |
class OptionPlotoptionsBellcurveSonificationDefaultinstrumentoptionsMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsXrangeSonificationDefaultinstrumentoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def draw_multiple_line_text(image, text, font, text_color, padding, wrap=50, transparent=False) -> None:
draw = ImageDraw.Draw(image)
Fontperm = font.getsize(text)
(image_width, image_height) = image.size
lines = textwrap.wrap(text, width=wrap)
y = ((image_height / 2) - (((Fontperm[1] + ((len(lines) * padding) / len(lines))) * len(lines)) / 2))
for line in lines:
(line_width, line_height) = font.getsize(line)
if transparent:
shadowcolor = 'black'
for i in range(1, 5):
draw.text(((((image_width - line_width) / 2) - i), (y - i)), line, font=font, fill=shadowcolor)
draw.text(((((image_width - line_width) / 2) + i), (y - i)), line, font=font, fill=shadowcolor)
draw.text(((((image_width - line_width) / 2) - i), (y + i)), line, font=font, fill=shadowcolor)
draw.text(((((image_width - line_width) / 2) + i), (y + i)), line, font=font, fill=shadowcolor)
draw.text((((image_width - line_width) / 2), y), line, font=font, fill=text_color)
y += (line_height + padding) |
.django_db
def test_different_matches_with_each_filter(client, monkeypatch, elasticsearch_award_index, multiple_subawards_with_tas):
_setup_es(client, monkeypatch, elasticsearch_award_index)
resp = query_by_treasury_account_components_subaward(client, {'require': [_agency_path(BASIC_TAS)]}, [component_dictionary(ATA_TAS)])
assert (resp.json()['results'].sort(key=(lambda elem: elem['internal_id'])) == [_subaward1(), _subaward2()].sort(key=(lambda elem: elem['internal_id']))) |
def demo_statistics(trace: Trace, rank: int, k: Optional[int]=None) -> pd.DataFrame:
df = trace.get_trace(rank)
sym_id_map = trace.symbol_table.get_sym_id_map()
sym_table = trace.symbol_table.get_sym_table()
df_cpu_ops = df[(df['cat'] == sym_id_map['Kernel'])]
total_time = df_cpu_ops['dur'].sum()
gb = df_cpu_ops.groupby(by='name')['dur'].agg(['sum', 'max', 'min', 'mean', 'std', 'count'])
gb['percent'] = ((gb['sum'] / total_time) * 100)
gb.reset_index(inplace=True)
gb['name'] = gb['name'].apply((lambda x: sym_table[x]))
gb = gb.set_index('name', drop=True)
if (k is None):
k = len(gb)
result_df = gb.sort_values(by='percent', ascending=False)
k = min(k, len(result_df))
top_k = result_df[:k].copy()
if (k < len(result_df)):
others = result_df[k:]
other_sum = others['sum'].sum()
top_k.loc['all_others'] = [other_sum, others['max'].max(), others['min'].min(), others['mean'].mean(), others['std'].mean(), others['count'].mean(), ((other_sum / total_time) * 100)]
return top_k |
class OptionPlotoptionsBubbleDragdropGuideboxDefault(Options):
def className(self):
return self._config_get('highcharts-drag-box-default')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('rgba(0, 0, 0, 0.1)')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get('move')
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#888')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(900)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def fingerprint_package(package_dir: Path, package_type: Union[(str, PackageType)]) -> None:
package_type = PackageType(package_type)
item_type = str(package_type)
default_config_file_name = _get_default_configuration_file_name_from_type(item_type)
config_loader = ConfigLoader.from_configuration_type(item_type)
config_file_path = Path(package_dir, default_config_file_name)
config = config_loader.load(open_file(config_file_path))
if (not package_dir.exists()):
raise ValueError('Package not found at path {}'.format(package_dir))
fingerprints_dict = _compute_fingerprint(package_dir, ignore_patterns=config.fingerprint_ignore_patterns)
config.fingerprint = fingerprints_dict
config_loader.dump(config, open_file(config_file_path, 'w')) |
def test_event_get_thread_group1(session):
data = {'threadKey': {'threadFbId': 1234}, 'messageId': 'mid.$gAAT4Sw1WSGh14A3MOFvrsiDvr3Yc', 'offlineThreadingId': '', 'actorFbId': 4321, 'timestamp': , 'tags': ['inbox', 'sent', 'tq', 'blindly_apply_message_folder', 'source:messenger:web']}
assert (Group(session=session, id='1234') == Event._get_thread(session, data)) |
class FourierTransform(object):
def forward(self, field):
raise NotImplementedError()
def backward(self, field):
raise NotImplementedError()
def get_transformation_matrix_forward(self):
coords_in = self.input_grid.as_('cartesian').coords
coords_out = self.output_grid.as_('cartesian').coords
A = np.exp(((- 1j) * np.dot(np.array(coords_out).T, coords_in)))
A *= self.input_grid.weights
return A
def get_transformation_matrix_backward(self):
coords_in = self.input_grid.as_('cartesian').coords
coords_out = self.output_grid.as_('cartesian').coords
A = np.exp((1j * np.dot(np.array(coords_in).T, coords_out)))
A *= self.output_grid.weights
A /= ((2 * np.pi) ** self.input_grid.ndim)
return A |
def generate_oura_sleep_header_chart(date, days=7, summary=False, resample='D'):
height = (chartHeight if (not summary) else 300)
if summary:
df = pd.read_sql(sql=app.session.query(ouraSleepSummary).statement, con=engine, index_col='report_date')
else:
df = pd.read_sql(sql=app.session.query(ouraSleepSummary).filter((ouraSleepSummary.report_date > date)).statement, con=engine, index_col='report_date')[:days]
daily_sleep_hr_target = app.session.query(athlete).filter((athlete.athlete_id == 1)).first().daily_sleep_hr_target
app.session.remove()
df = df.set_index(pd.to_datetime(df.index))
df = df.resample(resample).mean()
(buttons, range, tickformat) = modal_range_buttons(df=df, resample=resample)
df['awake_tooltip'] = ['<b>Awake</b>: {:.0f}h {:.0f}m'.format(x, y) for (x, y) in zip((df['awake'] // 3600), ((df['awake'] % 3600) // 60))]
df['rem_tooltip'] = ['<b>REM</b>: {:.0f}h {:.0f}m <b>{:.0f}%'.format(x, y, z) for (x, y, z) in zip((df['rem'] // 3600), ((df['rem'] % 3600) // 60), ((df['rem'] / df['total']) * 100))]
df['light_tooltip'] = ['<b>Light</b>: {:.0f}h {:.0f}m <b>{:.0f}%'.format(x, y, z) for (x, y, z) in zip((df['light'] // 3600), ((df['light'] % 3600) // 60), ((df['light'] / df['total']) * 100))]
df['deep_tooltip'] = ['<b>Deep</b>: {:.0f}h {:.0f}m <b>{:.0f}%'.format(x, y, z) for (x, y, z) in zip((df['deep'] // 3600), ((df['deep'] % 3600) // 60), ((df['deep'] / df['total']) * 100))]
full_chart = [go.Scatter(name='Deep', x=df.index, y=round((df['deep'] / 60)), mode='lines', text=df['deep_tooltip'], hoverinfo='text', opacity=0.7, line={'shape': 'spline', 'color': dark_blue}, fill='tonexty', fillcolor=dark_blue, stackgroup='one'), go.Scatter(name='Light', x=df.index, y=round((df['light'] / 60)), mode='lines', text=df['light_tooltip'], hoverinfo='text', opacity=0.7, line={'shape': 'spline', 'color': light_blue}, fill='tonexty', fillcolor=light_blue, stackgroup='one'), go.Scatter(name='REM', x=df.index, y=round((df['rem'] / 60)), mode='lines', text=df['rem_tooltip'], hoverinfo='text', opacity=0.7, line={'shape': 'spline', 'color': teal}, fill='tonexty', fillcolor=teal, stackgroup='one'), go.Scatter(name='Awake', x=df.index, y=round((df['awake'] / 60)), mode='lines', text=df['awake_tooltip'], hoverinfo='text', opacity=0.7, line={'shape': 'spline', 'color': white}, fill='tonexty', fillcolor=white, stackgroup='one'), go.Scatter(name='8hr target', x=df.index, y=[(daily_sleep_hr_target * 60) for x in df.index], mode='lines+text', text=[('{} hours'.format(daily_sleep_hr_target) if (x == df.index.max()) else '') for x in df.index], textfont=dict(size=11, color='rgb(150,150,150)'), textposition='bottom left', hoverinfo='none', line={'dash': 'dot', 'color': 'rgb(150,150,150)', 'width': 1}, showlegend=False)]
summary_chart = [go.Scatter(name='Deep', x=df.index, y=round((df['deep'] / 60)), mode='lines', text=df['deep_tooltip'], hoverinfo='text+x', opacity=0.7, line={'shape': 'spline', 'color': dark_blue}, fill='tonexty', fillcolor=dark_blue, stackgroup='one'), go.Scatter(name='Light', x=df.index, y=round((df['light'] / 60)), mode='lines', text=df['light_tooltip'], hoverinfo='text+x', opacity=0.7, line={'shape': 'spline', 'color': light_blue}, fill='tonexty', fillcolor=light_blue, stackgroup='one'), go.Scatter(name='REM', x=df.index, y=round((df['rem'] / 60)), mode='lines', text=df['rem_tooltip'], hoverinfo='text+x', opacity=0.7, line={'shape': 'spline', 'color': teal}, fill='tonexty', fillcolor=teal, stackgroup='one'), go.Scatter(name='Awake', x=df.index, y=round((df['awake'] / 60)), mode='lines', text=df['awake_tooltip'], hoverinfo='text+x', opacity=0.7, line={'shape': 'spline', 'color': white}, fill='tonexty', fillcolor=white, stackgroup='one'), go.Scatter(name='8hr target', x=df.index, y=[(daily_sleep_hr_target * 60) for x in df.index], mode='lines+text', text=[('{} hours'.format(daily_sleep_hr_target) if (x == df.index.max()) else '') for x in df.index], textfont=dict(size=11, color='rgb(150,150,150)'), textposition='bottom left', hoverinfo='x', line={'dash': 'dot', 'color': 'rgb(150,150,150)', 'width': 1}, showlegend=False)]
full_layout = go.Layout(height=height, transition=dict(duration=transition), font=dict(color=white, size=10), xaxis=dict(showline=True, color=white, showgrid=False, showticklabels=True, tickvals=df.index, tickformat='%a'), yaxis=dict(showgrid=False, showticklabels=False, gridcolor='rgb(73, 73, 73)', gridwidth=0.5), margin={'l': 0, 'b': 20, 't': 0, 'r': 0}, showlegend=False, legend=dict(x=0.5, y=(- 0.2), xanchor='center', orientation='h'), hovermode='x')
summary_layout = go.Layout(height=height, transition=dict(duration=transition), font=dict(color=white, size=10), xaxis=dict(showline=True, color=white, showgrid=False, showticklabels=True, tickvals=df.index, tickformat='%b %d', range=range, rangeselector=dict(borderwidth=0.5, buttons=buttons, xanchor='center', x=0.5, y=0.97), rangeslider=dict(visible=True)), yaxis=dict(showgrid=False, showticklabels=False, gridcolor='rgb(73, 73, 73)', gridwidth=0.5), margin={'l': 0, 'b': 20, 't': 0, 'r': 0}, showlegend=False, hovermode='x')
short_layout = go.Layout(height=height, transition=dict(duration=transition), font=dict(color=white, size=10), xaxis=dict(showline=True, color=white, showgrid=False, showticklabels=True, tickvals=df.index, tickformat='%a'), yaxis=dict(showgrid=False, showticklabels=False, gridcolor='rgb(73, 73, 73)', gridwidth=0.5), margin={'l': 0, 'b': 20, 't': 0, 'r': 0}, showlegend=False, legend=dict(x=0.5, y=(- 0.2), xanchor='center', orientation='h', font=dict(size=10, color=white)))
short_chart = [go.Bar(name='Deep', x=df.index, y=round((df['deep'] / 60)), text=df['deep_tooltip'], hoverinfo='text', marker={'color': dark_blue}), go.Bar(name='Light', x=df.index, y=round((df['light'] / 60)), text=df['light_tooltip'], hoverinfo='text', marker={'color': light_blue}), go.Bar(name='REM', x=df.index, y=round((df['rem'] / 60)), text=df['rem_tooltip'], hoverinfo='text', marker={'color': teal}), go.Bar(name='Awake', x=df.index, y=round((df['awake'] / 60)), text=df['awake_tooltip'], hoverinfo='text', marker={'color': white})]
if summary:
chart = summary_chart
layout = summary_layout
else:
chart = (short_chart if (len(df) <= 3) else full_chart)
layout = (short_layout if (len(df) <= 3) else full_layout)
figure = {'data': chart, 'layout': layout}
clickData = {'points': [{'x': df.index.max(), 'y': df['deep'].max()}, {'y': df['light'].max()}, {'y': df['rem'].max()}, {'y': df['awake'].max()}, {'y': daily_sleep_hr_target}]}
return (figure, clickData) |
def addpattern(base_func, *add_funcs, **kwargs):
allow_any_func = kwargs.pop('allow_any_func', False)
if ((not allow_any_func) and (not _coconut.getattr(base_func, '_coconut_is_match', False))):
_coconut.warnings.warn((('Possible misuse of addpattern with non-pattern-matching function ' + _coconut.repr(base_func)) + ' (pass allow_any_func=True to dismiss)'), stacklevel=2)
if kwargs:
raise _coconut.TypeError(('addpattern() got unexpected keyword arguments ' + _coconut.repr(kwargs)))
if add_funcs:
return _coconut_base_pattern_func(base_func, *add_funcs)
return _coconut_partial(_coconut_base_pattern_func, base_func) |
class Migration(migrations.Migration):
dependencies = [('extra_settings', '0002_auto__1714')]
operations = [migrations.AlterField(model_name='setting', name='value_file', field=models.FileField(blank=True, upload_to=extra_settings.fields.upload_to_files, verbose_name='Value')), migrations.AlterField(model_name='setting', name='value_image', field=models.FileField(blank=True, upload_to=extra_settings.fields.upload_to_images, verbose_name='Value'))] |
def push_login(core):
cookiesDict = core.s.cookies.get_dict()
if ('wxuin' in cookiesDict):
url = ('%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % (config.BASE_URL, cookiesDict['wxuin']))
headers = {'User-Agent': core.user_agent}
resp = core.s.get(url, headers=headers)
try:
r = resp.json()
except Exception:
logger.error(f'Login info token is not a valid JSON: {resp.content}')
return False
if (('uuid' in r) and (r.get('ret') in (0, '0'))):
core.uuid = r['uuid']
return r['uuid']
return False |
def _get_destination_info(connection):
destination_info = {'service': {'name': '', 'resource': 'redis', 'type': ''}}
if hasattr(connection, '_pool_or_conn'):
destination_info['port'] = int(connection._pool_or_conn.address[1])
destination_info['address'] = connection._pool_or_conn.address[0]
else:
destination_info['port'] = int(connection.address[1])
destination_info['address'] = connection.address[0]
return destination_info |
def test_filter_is_applied_to_search_but_not_relevant_facet():
bs = BlogSearch('python search', filters={'category': 'elastic'})
s = bs.build_search()
assert ({'aggs': {'_filter_tags': {'filter': {'terms': {'category.raw': ['elastic']}}, 'aggs': {'tags': {'terms': {'field': 'tags'}}}}, '_filter_category': {'filter': {'match_all': {}}, 'aggs': {'category': {'terms': {'field': 'category.raw'}}}}}, 'post_filter': {'terms': {'category.raw': ['elastic']}}, 'query': {'multi_match': {'fields': ('title^5', 'body'), 'query': 'python search'}}, 'highlight': {'fields': {'body': {}, 'title': {}}}} == s.to_dict()) |
class Schedule(_common.FlyteIdlEntity):
class FixedRateUnit(object):
MINUTE = _schedule_pb2.MINUTE
HOUR = _schedule_pb2.HOUR
DAY = _schedule_pb2.DAY
def enum_to_string(cls, int_value):
if (int_value == cls.MINUTE):
return 'MINUTE'
elif (int_value == cls.HOUR):
return 'HOUR'
elif (int_value == cls.DAY):
return 'DAY'
else:
return '{}'.format(int_value)
class FixedRate(_common.FlyteIdlEntity):
def __init__(self, value, unit):
self._value = value
self._unit = unit
def value(self):
return self._value
def unit(self):
return self._unit
def to_flyte_idl(self):
return _schedule_pb2.FixedRate(value=self.value, unit=self.unit)
def from_flyte_idl(cls, pb2_object):
return cls(pb2_object.value, pb2_object.unit)
class CronSchedule(_common.FlyteIdlEntity):
def __init__(self, schedule, offset):
self._schedule = schedule
self._offset = offset
def schedule(self):
return self._schedule
def offset(self):
return self._offset
def to_flyte_idl(self):
return _schedule_pb2.CronSchedule(schedule=self.schedule, offset=self.offset)
def from_flyte_idl(cls, pb2_object):
return cls((pb2_object.schedule or None), (pb2_object.offset or None))
def __init__(self, kickoff_time_input_arg, cron_expression=None, rate=None, cron_schedule=None):
self._kickoff_time_input_arg = kickoff_time_input_arg
self._cron_expression = cron_expression
self._rate = rate
self._cron_schedule = cron_schedule
def kickoff_time_input_arg(self):
return self._kickoff_time_input_arg
def cron_expression(self):
return self._cron_expression
def rate(self):
return self._rate
def cron_schedule(self):
return self._cron_schedule
def schedule_expression(self):
return (self.cron_expression or self.rate or self.cron_schedule)
def to_flyte_idl(self):
return _schedule_pb2.Schedule(kickoff_time_input_arg=self.kickoff_time_input_arg, cron_expression=self.cron_expression, rate=(self.rate.to_flyte_idl() if (self.rate is not None) else None), cron_schedule=(self.cron_schedule.to_flyte_idl() if (self.cron_schedule is not None) else None))
def from_flyte_idl(cls, pb2_object):
return Schedule(pb2_object.kickoff_time_input_arg, cron_expression=(pb2_object.cron_expression if pb2_object.HasField('cron_expression') else None), rate=(Schedule.FixedRate.from_flyte_idl(pb2_object.rate) if pb2_object.HasField('rate') else None), cron_schedule=(Schedule.CronSchedule.from_flyte_idl(pb2_object.cron_schedule) if pb2_object.HasField('cron_schedule') else None)) |
class StaticServer(BaseServer):
def __init__(self, address, port, retries, timeout, root, stats_callback, stats_interval, network_queue):
super().__init__(address, port, retries, timeout, stats_callback, stats_interval)
self._root = root
self._listener = MockSocketListener(network_queue)
self._handler = None
def get_handler(self, addr, peer, path, options):
self._handler = Mock(addr, peer, path, options)
self._handler.addr = addr
self._handler.peer = peer
self._handler.path = path
self._handler.options = options
self._handler.start = Mock()
return self._handler |
class Map():
def __init__(self, flinger: Flinger, svg: svgwrite.Drawing, configuration: MapConfiguration) -> None:
self.flinger: Flinger = flinger
self.svg: svgwrite.Drawing = svg
self.scheme: Scheme = configuration.scheme
self.configuration = configuration
self.background_color: Color = self.scheme.get_color('background_color')
if (color := self.configuration.background_color()):
self.background_color = color
def draw(self, constructor: Constructor) -> None:
self.svg.add(Rect((0.0, 0.0), self.flinger.size, fill=self.background_color))
logging.info('Drawing ways...')
figures: list[StyledFigure] = constructor.get_sorted_figures()
top_figures: list[StyledFigure] = [x for x in figures if (x.line_style.priority >= ROAD_PRIORITY)]
bottom_figures: list[StyledFigure] = [x for x in figures if (x.line_style.priority < ROAD_PRIORITY)]
for figure in bottom_figures:
path_commands: str = figure.get_path(self.flinger)
if path_commands:
path: SVGPath = SVGPath(d=path_commands)
path.update(figure.line_style.style)
self.svg.add(path)
constructor.roads.draw(self.svg, self.flinger)
for figure in top_figures:
path_commands: str = figure.get_path(self.flinger)
if path_commands:
path: SVGPath = SVGPath(d=path_commands)
path.update(figure.line_style.style)
self.svg.add(path)
if self.scheme.draw_trees:
for tree in constructor.trees:
tree.draw(self.svg, self.flinger, self.scheme)
if self.scheme.draw_craters:
for crater in constructor.craters:
crater.draw(self.svg, self.flinger)
if self.scheme.draw_buildings:
self.draw_buildings(constructor, self.configuration.use_building_colors)
if self.scheme.draw_directions:
for direction_sector in constructor.direction_sectors:
direction_sector.draw(self.svg, self.scheme)
if self.scheme.draw_nodes:
occupied: Optional[Occupied]
if (self.configuration.overlap == 0):
occupied = None
else:
occupied = Occupied(self.flinger.size[0], self.flinger.size[1], self.configuration.overlap)
nodes: list[Point] = sorted(constructor.points, key=(lambda x: (- x.priority)))
logging.info('Drawing main icons...')
for node in nodes:
node.draw_main_shapes(self.svg, occupied)
logging.info('Drawing extra icons...')
for point in nodes:
point.draw_extra_shapes(self.svg, occupied)
logging.info('Drawing texts...')
for point in nodes:
if ((not self.configuration.is_wireframe()) and (self.configuration.label_mode != LabelMode.NO)):
point.draw_texts(self.svg, occupied, self.configuration.label_mode)
if self.configuration.show_credit:
self.draw_credits(constructor.flinger.size)
def draw_buildings(self, constructor: Constructor, use_building_colors: bool) -> None:
if (self.configuration.building_mode == BuildingMode.NO):
return
if (self.configuration.building_mode == BuildingMode.FLAT):
for building in constructor.buildings:
building.draw(self.svg, self.flinger, use_building_colors)
return
logging.info('Drawing isometric buildings...')
scale: float = self.flinger.get_scale()
building_shade: Group = Group(opacity=0.1)
for building in constructor.buildings:
building.draw_shade(building_shade, self.flinger)
self.svg.add(building_shade)
walls: dict[(Segment, Building)] = {}
for building in constructor.buildings:
for part in building.parts:
walls[part] = building
sorted_walls = sorted(walls.keys())
previous_height: float = 0.0
for height in sorted(constructor.heights):
shift_1: np.ndarray = np.array((0.0, (((- previous_height) * scale) * BUILDING_SCALE)))
shift_2: np.ndarray = np.array((0.0, (((- height) * scale) * BUILDING_SCALE)))
for wall in sorted_walls:
building: Building = walls[wall]
if ((building.height < height) or (building.min_height >= height)):
continue
draw_walls(self.svg, building, wall, height, shift_1, shift_2, use_building_colors)
if self.configuration.draw_roofs:
for building in constructor.buildings:
if (building.height == height):
building.draw_roof(self.svg, self.flinger, scale, use_building_colors)
previous_height = height
def draw_simple_roads(self, roads: Iterator[Road]) -> None:
nodes: dict[(OSMNode, set[RoadPart])] = {}
for road in roads:
for index in range((len(road.nodes) - 1)):
node_1: OSMNode = road.nodes[index]
node_2: OSMNode = road.nodes[(index + 1)]
point_1: np.ndarray = self.flinger.fling(node_1.coordinates)
point_2: np.ndarray = self.flinger.fling(node_2.coordinates)
scale: float = self.flinger.get_scale(node_1.coordinates)
part_1: RoadPart = RoadPart(point_1, point_2, road.lanes, scale)
part_2: RoadPart = RoadPart(point_2, point_1, road.lanes, scale)
for node in (node_1, node_2):
if (node not in nodes):
nodes[node] = set()
nodes[node_1].add(part_1)
nodes[node_2].add(part_2)
for (node, parts) in nodes.items():
if (len(parts) < 4):
continue
intersection: Intersection = Intersection(list(parts))
intersection.draw(self.svg, True)
def draw_credits(self, size: np.ndarray):
right_margin: float = 15.0
bottom_margin: float = 15.0
font_size: float = 10.0
vertical_spacing: float = 2.0
text_color: Color = Color('#888888')
outline_color: Color = Color('#FFFFFF')
credit_list: list[tuple[(str, tuple[(float, float)])]] = [(f'Rendering: {__project__}', (right_margin, bottom_margin))]
if self.configuration.credit:
data_credit: tuple[(str, tuple[(float, float)])] = (f'Data: {self.configuration.credit}', (right_margin, ((bottom_margin + font_size) + vertical_spacing)))
credit_list.append(data_credit)
for (text, point) in credit_list:
for (stroke_width, stroke, opacity) in ((3.0, outline_color, 0.7), (1.0, None, 1.0)):
draw_text(self.svg, text, (size - np.array(point)), font_size, text_color, anchor='end', stroke_width=stroke_width, stroke=stroke, opacity=opacity) |
def test_stable_cache_key():
pt = Dict
lt = TypeEngine.to_literal_type(pt)
ctx = FlyteContextManager.current_context()
kwargs = {'a': 42, 'b': 'abcd', 'c': 0.12349, 'd': [1, 2, 3], 'e': {'e_a': 11, 'e_b': list(range(1000)), 'e_c': {'e_c_a': 12.34, 'e_c_b': 'a string'}}}
lit = TypeEngine.to_literal(ctx, kwargs, Dict, lt)
lm = LiteralMap(literals={'lit_1': lit, 'lit_2': TypeEngine.to_literal(ctx, 99, int, LiteralType(simple=SimpleType.INTEGER)), 'lit_3': TypeEngine.to_literal(ctx, 3.14, float, LiteralType(simple=SimpleType.FLOAT)), 'lit_4': TypeEngine.to_literal(ctx, True, bool, LiteralType(simple=SimpleType.BOOLEAN))})
key = _calculate_cache_key('task_name_1', '31415', lm)
assert (key == 'task_name_1-31415-404b45fd4bf37f50049') |
class TestSudoCommandUsePty():
test = CISAudit()
test_id = '1.1'
(CISAudit, '_shellexec', mock_sudo_log_exists_pass)
def test_sudo_log_exists_pass(self):
state = self.test.audit_sudo_log_exists()
assert (state == 0)
(CISAudit, '_shellexec', mock_sudo_log_exists_fail)
def test_sudo_log_exists_fail(self):
state = self.test.audit_sudo_log_exists()
assert (state == 1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.