code stringlengths 281 23.7M |
|---|
_baseline_registry.register_il_policy
class ObjectNavILPolicy(ILPolicy):
def __init__(self, observation_space: Space, action_space: Space, backbone_config: Config, model_config: Config, run_type: str):
super().__init__(ObjectNavILNet(observation_space=observation_space, model_config=model_config, backbone_config=backbone_config, num_actions=action_space.n, run_type=run_type), action_space.n)
def from_config(cls, config: Config, observation_space, action_space):
return cls(observation_space=observation_space, action_space=action_space, backbone_config=config.model, model_config=config.MODEL, run_type=config.RUN_TYPE) |
class TestWriteProtectionCommands(EfuseTestCase):
def test_write_protect_efuse(self):
self.espefuse_py('write_protect_efuse -h')
if (arg_chip == 'esp32'):
efuse_lists = 'WR_DIS RD_DIS CODING_SCHEME\n XPD_SDIO_FORCE XPD_SDIO_REG XPD_SDIO_TIEH SPI_PAD_CONFIG_CLK\n FLASH_CRYPT_CNT UART_DOWNLOAD_DIS FLASH_CRYPT_CONFIG\n ADC_VREF BLOCK1 BLOCK2 BLOCK3'
efuse_lists2 = 'WR_DIS RD_DIS'
elif (arg_chip == 'esp32c2'):
efuse_lists = 'RD_DIS DIS_DOWNLOAD_ICACHE\n XTS_KEY_LENGTH_256 UART_PRINT_CONTROL'
efuse_lists2 = 'RD_DIS DIS_DOWNLOAD_ICACHE'
elif (arg_chip == 'esp32p4'):
efuse_lists = 'RD_DIS KEY_PURPOSE_0 SECURE_BOOT_KEY_REVOKE0\n SPI_BOOT_CRYPT_CNT'
efuse_lists2 = 'RD_DIS KEY_PURPOSE_0 KEY_PURPOSE_2'
else:
efuse_lists = 'RD_DIS DIS_ICACHE DIS_FORCE_DOWNLOAD\n DIS_CAN SOFT_DIS_JTAG DIS_DOWNLOAD_MANUAL_ENCRYPT\n USB_EXCHG_PINS WDT_DELAY_SEL SPI_BOOT_CRYPT_CNT\n SECURE_BOOT_KEY_REVOKE0 SECURE_BOOT_KEY_REVOKE1\n SECURE_BOOT_KEY_REVOKE2 KEY_PURPOSE_0 KEY_PURPOSE_1\n KEY_PURPOSE_2 KEY_PURPOSE_3 KEY_PURPOSE_4 KEY_PURPOSE_5\n SECURE_BOOT_EN SECURE_BOOT_AGGRESSIVE_REVOKE FLASH_TPUW\n DIS_DOWNLOAD_MODE\n ENABLE_SECURITY_DOWNLOAD UART_PRINT_CONTROL\n MAC OPTIONAL_UNIQUE_ID\n BLOCK_USR_DATA BLOCK_KEY0 BLOCK_KEY1\n BLOCK_KEY2 BLOCK_KEY3 BLOCK_KEY4 BLOCK_KEY5'
if ((arg_chip not in ['esp32h2', 'esp32h2beta1']) and (arg_chip not in ['esp32c6'])):
efuse_lists += ' DIS_DOWNLOAD_ICACHE\n SPI_PAD_CONFIG_CLK SPI_PAD_CONFIG_Q\n SPI_PAD_CONFIG_D SPI_PAD_CONFIG_CS SPI_PAD_CONFIG_HD\n SPI_PAD_CONFIG_WP SPI_PAD_CONFIG_DQS SPI_PAD_CONFIG_D4\n SPI_PAD_CONFIG_D5 SPI_PAD_CONFIG_D6 SPI_PAD_CONFIG_D7'
efuse_lists2 = 'RD_DIS DIS_ICACHE'
self.espefuse_py(f'write_protect_efuse {efuse_lists}')
output = self.espefuse_py(f'write_protect_efuse {efuse_lists2}')
assert (output.count('is already write protected') == 2)
def test_write_protect_efuse2(self):
if (arg_chip == 'esp32'):
self.espefuse_py('write_protect_efuse WR_DIS')
self.espefuse_py('write_protect_efuse CODING_SCHEME', check_msg='A fatal error occurred: This efuse cannot be write-disabled due to the WR_DIS field is already write-disabled', ret_code=2) |
def extractAbodammenWpcomstagingCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
.django_db
def test_failure_with_invalid_filters(client, monkeypatch, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = client.post('/api/v2/search/spending_by_award', content_type='application/json', data=json.dumps({}))
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY)
assert (resp.json().get('detail') == "Missing value: 'fields' is a required field")
resp = client.post('/api/v2/search/spending_by_award', content_type='application/json', data=json.dumps({'fields': [], 'filters': {}, 'page': 1, 'limit': 60, 'subawards': False}))
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY)
assert (resp.json().get('detail') == "Missing value: 'filters|award_type_codes' is a required field")
resp = client.post('/api/v2/search/spending_by_award', content_type='application/json', data=json.dumps({'fields': [], 'filters': {'time_period': [{'start_date': '2007-10-01', 'end_date': '2020-09-30'}], 'award_type_codes': ['A', 'B', 'C', 'D']}, 'page': 1, 'limit': 60, 'subawards': False}))
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY)
assert (resp.json().get('detail') == "Field 'fields' value '[]' is below min '1' items") |
def map_collection(func: Callable[(..., TReturn)], collection: Any) -> Any:
datatype = type(collection)
if isinstance(collection, Mapping):
return datatype(((key, func(val)) for (key, val) in collection.items()))
if is_string(collection):
return collection
elif isinstance(collection, Iterable):
return datatype(map(func, collection))
else:
return collection |
class BlameHunk(object):
def _from_c(cls, blame, ptr):
hunk = cls.__new__(cls)
hunk._blame = blame
hunk._hunk = ptr
return hunk
def lines_in_hunk(self):
return self._hunk.lines_in_hunk
def boundary(self):
return (int(ffi.cast('int', self._hunk.boundary)) != 0)
def final_start_line_number(self):
return self._hunk.final_start_line_number
def final_committer(self):
return wrap_signature(self._hunk.final_signature)
def final_commit_id(self):
return Oid(raw=bytes(ffi.buffer(ffi.addressof(self._hunk, 'final_commit_id'))[:]))
def orig_start_line_number(self):
return self._hunk.orig_start_line_number
def orig_committer(self):
return wrap_signature(self._hunk.orig_signature)
def orig_commit_id(self):
return Oid(raw=bytes(ffi.buffer(ffi.addressof(self._hunk, 'orig_commit_id'))[:]))
def orig_path(self):
path = self._hunk.orig_path
if (not path):
return None
return ffi.string(path).decode() |
class FillingParser(RewritingParser):
default_encoding = 'utf8'
text_input_types = set('text hidden search tel url email datetime date month week time datetime-local number range color'.split())
def __init__(self, defaults, errors=None, use_all_keys=False, error_formatters=None, error_class='error', add_attributes=None, listener=None, auto_error_formatter=None, text_as_default=False, checkbox_checked_if_present=False, encoding=None, prefix_error=True, force_defaults=True, skip_passwords=False, data_formencode_form=None, data_formencode_ignore=None):
RewritingParser.__init__(self)
self.source = None
self.lines = None
self.source_pos = None
self.defaults = defaults
self.in_textarea = None
self.skip_textarea = False
self.last_textarea_name = None
self.in_select = None
self.skip_next = False
self.errors = (errors or {})
if isinstance(self.errors, str):
self.errors = {None: self.errors}
self.in_error = None
self.skip_error = False
self.use_all_keys = use_all_keys
self.used_keys = set()
self.used_errors = set()
if (error_formatters is None):
self.error_formatters = default_formatter_dict
else:
self.error_formatters = error_formatters
self.error_class = error_class
self.add_attributes = (add_attributes or {})
self.listener = listener
self.auto_error_formatter = auto_error_formatter
self.text_as_default = text_as_default
self.checkbox_checked_if_present = checkbox_checked_if_present
self.encoding = encoding
self.prefix_error = prefix_error
self.force_defaults = force_defaults
self.skip_passwords = skip_passwords
self.data_formencode_form = data_formencode_form
self.data_formencode_ignore = data_formencode_ignore
def str_compare(self, str1, str2):
if (not isinstance(str1, str)):
str1 = str(str1)
if (type(str1) is type(str2)):
return (str1 == str2)
if isinstance(str1, str):
str1 = str1.encode((self.encoding or self.default_encoding))
else:
str2 = str2.encode((self.encoding or self.default_encoding))
return (str1 == str2)
def close(self):
self.handle_misc(None)
RewritingParser.close(self)
unused_errors = self.errors.copy()
for key in self.used_errors:
if (key in unused_errors):
del unused_errors[key]
if self.auto_error_formatter:
for (key, value) in unused_errors.items():
error_message = self.auto_error_formatter(value)
error_message = ('<!-- for: %s -->\n%s' % (key, error_message))
self.insert_at_marker(key, error_message)
unused_errors = {}
if self.use_all_keys:
unused = self.defaults.copy()
for key in self.used_keys:
if (key in unused):
del unused[key]
assert (not unused), ('These keys from defaults were not used in the form: %s' % ', '.join(unused))
if unused_errors:
error_text = [('%s: %s' % (key, self.errors[key])) for key in sorted(unused_errors)]
assert False, ('These errors were not used in the form: %s' % ', '.join(error_text))
if (self.encoding is not None):
new_content = []
for item in self._content:
new_content.append(item)
self._content = new_content
self._text = self._get_text()
def skip_output(self):
return ((self.in_textarea and self.skip_textarea) or self.skip_error)
def add_key(self, key):
self.used_keys.add(key)
def handle_starttag(self, tag, attrs, startend=False):
self.write_pos()
if self.data_formencode_form:
for a in attrs:
if (a[0] == 'data-formencode-form'):
if (a[1] != self.data_formencode_form):
return
if self.data_formencode_ignore:
for a in attrs:
if (a[0] == 'data-formencode-ignore'):
return
if (tag == 'input'):
self.handle_input(attrs, startend)
elif (tag == 'textarea'):
self.handle_textarea(attrs)
elif (tag == 'select'):
self.handle_select(attrs)
elif (tag == 'option'):
self.handle_option(attrs)
return
elif (tag == 'form:error'):
self.handle_error(attrs)
return
elif (tag == 'form:iferror'):
self.handle_iferror(attrs)
return
else:
return
if self.listener:
self.listener.listen_input(self, tag, attrs)
def handle_endtag(self, tag):
self.write_pos()
if (tag == 'textarea'):
self.handle_end_textarea()
elif (tag == 'select'):
self.handle_end_select()
elif (tag == 'form:error'):
self.handle_end_error()
elif (tag == 'form:iferror'):
self.handle_end_iferror()
def handle_startendtag(self, tag, attrs):
return self.handle_starttag(tag, attrs, True)
def handle_iferror(self, attrs):
name = self.get_attr(attrs, 'name')
assert name, ('Name attribute in <iferror> required at %i:%i' % self.getpos())
notted = name.startswith('not ')
if notted:
name = name.split(None, 1)[1]
self.in_error = name
ok = self.errors.get(name)
if notted:
ok = (not ok)
if (not ok):
self.skip_error = True
self.skip_next = True
def handle_end_iferror(self):
self.in_error = None
self.skip_error = False
self.skip_next = True
def handle_error(self, attrs):
name = self.get_attr(attrs, 'name')
if (name is None):
name = self.in_error
assert (name is not None), ('Name attribute in <form:error> required if not contained in <form:iferror> at %i:%i' % self.getpos())
formatter = (self.get_attr(attrs, 'format') or 'default')
error = self.errors.get(name, '')
if error:
error = self.error_formatters[formatter](error)
self.write_text(error)
self.skip_next = True
self.used_errors.add(name)
def handle_end_error(self):
self.skip_next = True
def handle_input(self, attrs, startend):
t = (self.get_attr(attrs, 'type') or 'text').lower()
name = self.get_attr(attrs, 'name')
if self.prefix_error:
self.write_marker(name)
value = self.defaults.get(name)
if (name in self.add_attributes):
for (attr_name, attr_value) in self.add_attributes[name].items():
if attr_name.startswith('+'):
attr_name = attr_name[1:]
self.set_attr(attrs, attr_name, (self.get_attr(attrs, attr_name, '') + attr_value))
else:
self.set_attr(attrs, attr_name, attr_value)
if (self.error_class and self.errors.get(self.get_attr(attrs, 'name'))):
self.add_class(attrs, self.error_class)
if (t in self.text_input_types):
if ((value is None) and (not self.force_defaults)):
value = self.get_attr(attrs, 'value', '')
self.set_attr(attrs, 'value', value)
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif (t == 'checkbox'):
if self.force_defaults:
selected = False
else:
selected = self.get_attr(attrs, 'checked')
if (not self.get_attr(attrs, 'value')):
if self.checkbox_checked_if_present:
selected = (name in self.defaults)
else:
selected = value
elif self.selected_multiple(value, self.get_attr(attrs, 'value', '')):
selected = True
if selected:
self.set_attr(attrs, 'checked', 'checked')
else:
self.del_attr(attrs, 'checked')
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif (t == 'radio'):
if self.str_compare(value, self.get_attr(attrs, 'value', '')):
self.set_attr(attrs, 'checked', 'checked')
elif (self.force_defaults or (name in self.defaults)):
self.del_attr(attrs, 'checked')
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif (t == 'password'):
if self.skip_passwords:
return
if ((value is None) and (not self.force_defaults)):
value = (value or self.get_attr(attrs, 'value', ''))
self.set_attr(attrs, 'value', value)
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif (t in ('file', 'image')):
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif (t in ('submit', 'reset', 'button')):
self.set_attr(attrs, 'value', (value or self.get_attr(attrs, 'value', '')))
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif self.text_as_default:
if (value is None):
value = self.get_attr(attrs, 'value', '')
self.set_attr(attrs, 'value', value)
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
else:
assert False, ("I don't know about this kind of <input>: %s at %i:%i" % ((t,) + self.getpos()))
if (not self.prefix_error):
self.write_marker(name)
def handle_textarea(self, attrs):
name = self.get_attr(attrs, 'name')
if self.prefix_error:
self.write_marker(name)
if (self.error_class and self.errors.get(name)):
self.add_class(attrs, self.error_class)
value = self.defaults.get(name, '')
if (value or self.force_defaults):
self.write_tag('textarea', attrs)
self.write_text(html_quote(value))
self.write_text('</textarea>')
self.skip_textarea = True
self.in_textarea = True
self.last_textarea_name = name
self.add_key(name)
def handle_end_textarea(self):
if self.skip_textarea:
self.skip_textarea = False
else:
self.write_text('</textarea>')
self.in_textarea = False
self.skip_next = True
if (not self.prefix_error):
self.write_marker(self.last_textarea_name)
self.last_textarea_name = None
def handle_select(self, attrs):
name = self.get_attr(attrs, 'name', False)
if (name and self.prefix_error):
self.write_marker(name)
if (self.error_class and self.errors.get(name)):
self.add_class(attrs, self.error_class)
self.in_select = self.get_attr(attrs, 'name', False)
self.write_tag('select', attrs)
self.skip_next = True
self.add_key(self.in_select)
def handle_end_select(self):
self.write_text('</select>')
self.skip_next = True
if ((not self.prefix_error) and self.in_select):
self.write_marker(self.in_select)
self.in_select = None
def handle_option(self, attrs):
assert (self.in_select is not None), ('<option> outside of <select> at %i:%i' % self.getpos())
if (self.in_select is not False):
if (self.force_defaults or (self.in_select in self.defaults)):
if self.selected_multiple(self.defaults.get(self.in_select), self.get_attr(attrs, 'value', '')):
self.set_attr(attrs, 'selected', 'selected')
self.add_key(self.in_select)
else:
self.del_attr(attrs, 'selected')
self.write_tag('option', attrs)
self.skip_next = True
def selected_multiple(self, obj, value):
if (obj is None):
return False
if isinstance(obj, str):
return (obj == value)
if hasattr(obj, '__contains__'):
if (value in obj):
return True
if hasattr(obj, '__iter__'):
for inner in obj:
if self.str_compare(inner, value):
return True
return self.str_compare(obj, value)
def write_marker(self, marker):
self._content.append((marker,))
def insert_at_marker(self, marker, text):
for (i, item) in enumerate(self._content):
if (item == (marker,)):
self._content.insert(i, text)
break
else:
self._content.insert(0, text) |
def wenxin_generate_stream(model: ProxyModel, tokenizer, params, device, context_len=2048):
MODEL_VERSION = {'ERNIE-Bot': 'completions', 'ERNIE-Bot-turbo': 'eb-instant'}
model_params = model.get_params()
model_name = model_params.proxyllm_backend
model_version = MODEL_VERSION.get(model_name)
if (not model_version):
(yield f'Unsupport model version {model_name}')
proxy_api_key = model_params.proxy_api_key
proxy_api_secret = model_params.proxy_api_secret
access_token = _build_access_token(proxy_api_key, proxy_api_secret)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
proxy_server_url = f'
if (not access_token):
(yield 'Failed to get access token. please set the correct api_key and secret key.')
messages: List[ModelMessage] = params['messages']
(history, systems) = __convert_2_wenxin_messages(messages)
system = ''
if (systems and (len(systems) > 0)):
system = systems[0]
payload = {'messages': history, 'system': system, 'temperature': params.get('temperature'), 'stream': True}
text = ''
res = requests.post(proxy_server_url, headers=headers, json=payload, stream=True)
print(f'Send request to {proxy_server_url} with real model {model_name}')
for line in res.iter_lines():
if line:
if (not line.startswith(b'data: ')):
error_message = line.decode('utf-8')
(yield error_message)
else:
json_data = line.split(b': ', 1)[1]
decoded_line = json_data.decode('utf-8')
if (decoded_line.lower() != '[DONE]'.lower()):
obj = json.loads(json_data)
if (obj['result'] is not None):
content = obj['result']
text += content
(yield text) |
def chromosome_scatter(cnarr, segments, variants, show_range, show_gene, antitarget_marker, do_trend, by_bin, window_width, y_min, y_max, title, segment_color):
(sel_probes, sel_segs, sel_snvs, window_coords, genes, chrom) = select_range_genes(cnarr, segments, variants, show_range, show_gene, window_width)
if (cnarr or segments):
if variants:
axgrid = pyplot.GridSpec(5, 1, hspace=0.5)
axis = pyplot.subplot(axgrid[:3])
axis2 = pyplot.subplot(axgrid[3:], sharex=axis)
snv_on_chromosome(axis2, sel_snvs, sel_segs, genes, do_trend, by_bin, segment_color)
else:
(_fig, axis) = pyplot.subplots()
if by_bin:
axis.set_xlabel('Position (bin)')
else:
axis.set_xlabel('Position (Mb)')
axis = cnv_on_chromosome(axis, sel_probes, sel_segs, genes, antitarget_marker=antitarget_marker, do_trend=do_trend, x_limits=window_coords, y_min=y_min, y_max=y_max, segment_color=segment_color)
elif variants:
(_fig, axis) = pyplot.subplots()
axis = snv_on_chromosome(axis, sel_snvs, sel_segs, genes, do_trend, by_bin, segment_color)
if (title is None):
title = ('%s %s' % ((cnarr or segments or variants).sample_id, chrom))
axis.set_title(title)
return axis.get_figure() |
def upgrade():
op.add_column('copr_dir', sa.Column('ownername', sa.Text(), nullable=False))
op.create_index(op.f('ix_copr_dir_ownername'), 'copr_dir', ['ownername'], unique=False)
op.create_unique_constraint('ownername_copr_dir_uniq', 'copr_dir', ['ownername', 'name'])
op.drop_constraint(u'copr_dir_copr_id_name_uniq', 'copr_dir', type_='unique') |
class OptionPlotoptionsNetworkgraphSonificationDefaultspeechoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_footballmatch_module_no_matchdata(lfs_match, monkeypatch):
def no_match(*args, **kwargs):
return {'fixtureListMeta': {'scorersButtonShouldBeEnabled': False}, 'matchData': []}
monkeypatch.setattr('qtile_extras.resources.footballscores.FootballMatch._get_scores_fixtures', no_match)
che = lfs_match('Chelsea')
assert (str(che) == 'Chelsea are not playing today.') |
def test_transformer_with_default_params():
df = pd.DataFrame({'var_A': (((['A'] * 6) + (['B'] * 10)) + (['C'] * 4)), 'var_B': (((['A'] * 10) + (['B'] * 6)) + (['C'] * 4)), 'var_C': (((['X'] * 7) + (['Y'] * 5)) + (['Z'] * 8)), 'var_D': (((['L'] * 3) + (['M'] * 9)) + (['N'] * 8)), 'var_E': (((['R'] * 7) + (['S'] * 4)) + (['T'] * 9)), 'target': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0]})
X = df.drop('target', axis=1).copy()
y = df['target'].copy()
sel = SelectByInformationValue()
sel.fit(df.drop('target', axis=1), df['target'])
X_tr = sel.fit_transform(X, y)
exp_dict = {'var_A': 0., 'var_B': 0., 'var_C': 0., 'var_D': 0., 'var_E': 0.}
features_to_drop = ['var_C', 'var_E']
exp_df = X.drop(features_to_drop, axis=1)
for key in exp_dict.keys():
assert math.isclose(exp_dict[key], sel.information_values_[key])
assert (sel.features_to_drop_ == features_to_drop)
assert X_tr.equals(exp_df) |
def test_decode_endpoints():
p = Prometheus()
hashes = {'6b33db53faf33c77d694ecab2e3fefadc7dacc70': {'__name__': 'poseidon_endpoint_metadata', 'acls': '[]', 'controller_type': 'faucet', 'ether_vendor': 'Micro-Star', 'hash_id': '6b33db53faf33c77d694ecab2e3fefadc7dacc70', 'ignore': 'False', 'instance': 'poseidon:9304', 'ipv4_address': '192.168.3.131', 'ipv4_os': 'Windows', 'ipv4_rdns': 'NO DATA', 'ipv4_subnet': '192.168.3.0/24', 'ipv6_subnet': 'NO DATA', 'job': 'poseidon', 'mac': '40:61:86:9a:f1:f5', 'name': 'None', 'next_state': 'None', 'port': '1', 'prev_state': 'queued', 'segment': 'switch1', 'state': 'operating', 'tenant': 'VLAN100', 'top_role': 'Administrator workstation'}}
role_hashes = {'6b33db53faf33c77d694ecab2e3fefadc7dacc70': {'mac': '40:61:86:9a:f1:f5', 'pcap_labels': 'foo', 'top_confidence': 1.0, 'state': 'operating', 'top_role': 'Administrator workstation', 'second_role': 'GPU laptop', 'second_confidence': 0., 'third_role': 'Developer workstation', 'third_confidence': 0.}}
endpoints = p.prom_endpoints(hashes, role_hashes)
endpoint = endpoints['6b33db53faf33c77d694ecab2e3fefadc7dacc70']
assert (endpoint.state == 'operating')
assert (endpoint.get_ipv4_os() == 'Windows')
(roles, confidences, pcap_labels) = endpoint.get_roles_confidences_pcap_labels()
assert (roles == ('Administrator workstation', 'GPU laptop', 'Developer workstation'))
assert (confidences == (1.0, 0., 0.))
assert (pcap_labels == 'foo') |
def prophet_copy(m, cutoff=None):
if (m.history is None):
raise Exception('This is for copying a fitted Prophet object.')
if m.specified_changepoints:
changepoints = m.changepoints
if (cutoff is not None):
last_history_date = max(m.history['ds'][(m.history['ds'] <= cutoff)])
changepoints = changepoints[(changepoints < last_history_date)]
else:
changepoints = None
m2 = m.__class__(growth=m.growth, n_changepoints=m.n_changepoints, changepoint_range=m.changepoint_range, changepoints=changepoints, yearly_seasonality=False, weekly_seasonality=False, daily_seasonality=False, holidays=m.holidays, holidays_mode=m.holidays_mode, seasonality_mode=m.seasonality_mode, seasonality_prior_scale=m.seasonality_prior_scale, changepoint_prior_scale=m.changepoint_prior_scale, holidays_prior_scale=m.holidays_prior_scale, mcmc_samples=m.mcmc_samples, interval_width=m.interval_width, uncertainty_samples=m.uncertainty_samples, stan_backend=(m.stan_backend.get_type() if (m.stan_backend is not None) else None))
m2.extra_regressors = deepcopy(m.extra_regressors)
m2.seasonalities = deepcopy(m.seasonalities)
m2.country_holidays = deepcopy(m.country_holidays)
return m2 |
def test_service_annotations():
config = '\nservice:\n annotations:\n cloud.google.com/load-balancer-type: "Internal"\n '
r = helm_template(config)
s = r['service'][name]['metadata']['annotations']['cloud.google.com/load-balancer-type']
assert (s == 'Internal')
config = '\nservice:\n annotations:\n service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0\n '
r = helm_template(config)
s = r['service'][name]['metadata']['annotations']['service.beta.kubernetes.io/aws-load-balancer-internal']
assert (s == '0.0.0.0/0') |
def failed_role_request(test, role, gen, code, con=None):
if (con == None):
con = test.controller
request = ofp.message.role_request(role=role, generation_id=gen)
(response, _) = con.transact(request)
test.assertIsInstance(response, ofp.message.role_request_failed_error_msg)
test.assertEqual(response.code, code) |
def fxfy_loops(ft: A, fn: A, theta: A):
n0 = theta.size
fx = np.empty_like(ft)
fy = np.empty_like(fn)
for index in range(n0):
sin_theta = np.sin(theta[index])
cos_theta = np.cos(theta[index])
fx[index] = ((cos_theta * ft[index]) - (sin_theta * fn[index]))
fy[index] = ((sin_theta * ft[index]) + (cos_theta * fn[index]))
return (fx, fy) |
.django_db
def test_financial_obligations(client, financial_obligations_models):
resp = client.get('/api/v2/federal_obligations/?funding_agency_id=654&fiscal_year=2016')
assert (resp.status_code == status.HTTP_200_OK)
assert (len(resp.data['results']) == 2)
res_awesome = resp.data['results'][0]
assert (res_awesome['id'] == '1234')
assert (res_awesome['account_number'] == '314-1592')
assert (res_awesome['account_title'] == 'Suits and Ties')
assert (res_awesome['obligated_amount'] == '400.00')
res_lame = resp.data['results'][1]
assert (res_lame['id'] == '6969')
assert (res_lame['account_number'] == '867-5309')
assert (res_lame['account_title'] == 'Turtlenecks and Chains')
assert (res_lame['obligated_amount'] == '100.00') |
class OptionPlotoptionsLineSonificationContexttracksMappingTremolo(Options):
def depth(self) -> 'OptionPlotoptionsLineSonificationContexttracksMappingTremoloDepth':
return self._config_sub_data('depth', OptionPlotoptionsLineSonificationContexttracksMappingTremoloDepth)
def speed(self) -> 'OptionPlotoptionsLineSonificationContexttracksMappingTremoloSpeed':
return self._config_sub_data('speed', OptionPlotoptionsLineSonificationContexttracksMappingTremoloSpeed) |
class ICSCalendarData():
def __init__(self, device_data):
self.name = device_data[CONF_NAME]
self._days = device_data[CONF_DAYS]
self._offset_hours = device_data[CONF_OFFSET_HOURS]
self.include_all_day = device_data[CONF_INCLUDE_ALL_DAY]
self._summary_prefix: str = device_data[CONF_PREFIX]
self.parser = ICalendarParser.get_instance(device_data[CONF_PARSER])
self.parser.set_filter(Filter(device_data[CONF_EXCLUDE], device_data[CONF_INCLUDE]))
self.offset = None
self.event = None
self._calendar_data = CalendarData(_LOGGER, self.name, device_data[CONF_URL], timedelta(minutes=device_data[CONF_DOWNLOAD_INTERVAL]))
self._calendar_data.set_headers(device_data[CONF_USERNAME], device_data[CONF_PASSWORD], device_data[CONF_USER_AGENT], device_data[CONF_ACCEPT_HEADER])
async def async_get_events(self, hass: HomeAssistant, start_date: datetime, end_date: datetime) -> list[CalendarEvent]:
event_list = []
if (await hass.async_add_executor_job(self._calendar_data.download_calendar)):
_LOGGER.debug('%s: Setting calendar content', self.name)
self.parser.set_content(self._calendar_data.get())
try:
event_list = self.parser.get_event_list(start=start_date, end=end_date, include_all_day=self.include_all_day, offset_hours=self._offset_hours)
except:
_LOGGER.error('async_get_events: %s: Failed to parse ICS!', self.name, exc_info=True)
event_list = []
for event in event_list:
event.summary = (self._summary_prefix + event.summary)
return event_list
(MIN_TIME_BETWEEN_UPDATES)
def update(self):
_LOGGER.debug('%s: Update was called', self.name)
if self._calendar_data.download_calendar():
_LOGGER.debug('%s: Setting calendar content', self.name)
self.parser.set_content(self._calendar_data.get())
try:
self.event = self.parser.get_current_event(include_all_day=self.include_all_day, now=hanow(), days=self._days, offset_hours=self._offset_hours)
except:
_LOGGER.error('update: %s: Failed to parse ICS!', self.name, exc_info=True)
if (self.event is not None):
_LOGGER.debug('%s: got event: %s; start: %s; end: %s; all_day: %s', self.name, self.event.summary, self.event.start, self.event.end, self.event.all_day)
(summary, offset) = extract_offset(self.event.summary, OFFSET)
self.event.summary = (self._summary_prefix + summary)
self.offset = offset
return True
_LOGGER.debug('%s: No event found!', self.name)
return False |
def test_application_close(test_client_factory):
async def app(scope: Scope, receive: Receive, send: Send) -> None:
websocket = WebSocket(scope, receive=receive, send=send)
(await websocket.accept())
(await websocket.close(status.WS_1001_GOING_AWAY))
client = test_client_factory(app)
with client.websocket_connect('/') as websocket:
with pytest.raises(WebSocketDisconnect) as exc:
websocket.receive_text()
assert (exc.value.code == status.WS_1001_GOING_AWAY) |
def test_oniom_md():
calc_dict = {'high': {'type': 'pypsi4', 'method': 'scf', 'basis': 'sto-3g'}, 'low': {'type': 'pyxtb'}}
high_inds = (4, 5, 6)
from pysisyphus.calculators.ONIOM import ONIOM
oniom = ONIOM(calc_dict, high_inds)
geom = geom_loader('lib:acetaldehyd_oniom.xyz')
geom.set_calculator(oniom)
v0 = (0.005 * np.random.rand(*geom.coords.shape))
md_kwargs = {'v0': v0, 't': 40, 'dt': 0.5}
md_result = md(geom, **md_kwargs)
from pysisyphus.xyzloader import make_trj_str
coords = (md_result.coords.reshape((- 1), len(geom.atoms), 3) * BOHR2ANG)
trj_str = make_trj_str(geom.atoms, coords)
with open('md.trj', 'w') as handle:
handle.write(trj_str) |
.parametrize('type_, test, expected', [('str', 'value', 'value'), ('int', '10', 10), ('float', '0.5', 0.5), ('bool', 'yes', True), ('bool', 'y', True), ('bool', 'true', True), ('bool', 'True', True), ('bool', '1', True), ('bool', 'anything_else', False), ('SomeType', 'value', 'value')])
def test_from_str_to(type_, test, expected):
assert (from_str_to(type_)(test) == expected) |
class LivenessAnalysis():
def __init__(self, cfg: ControlFlowGraph):
self._cfg: ControlFlowGraph = cfg
self._uses_block: DefaultDict[(BasicBlock, InsertionOrderedSet[Variable])] = defaultdict(InsertionOrderedSet)
self._defs_block: DefaultDict[(BasicBlock, InsertionOrderedSet[Variable])] = defaultdict(InsertionOrderedSet)
self._uses_phi_block: DefaultDict[(BasicBlock, InsertionOrderedSet[Variable])] = defaultdict(InsertionOrderedSet)
self._defs_phi_block: DefaultDict[(BasicBlock, InsertionOrderedSet[Variable])] = defaultdict(InsertionOrderedSet)
self._live_in_block: DefaultDict[(BasicBlock, InsertionOrderedSet[Variable])] = defaultdict(InsertionOrderedSet)
self._live_out_block: DefaultDict[(BasicBlock, InsertionOrderedSet[Variable])] = defaultdict(InsertionOrderedSet)
self._create_live_sets()
def live_in_of(self, basicblock: BasicBlock) -> InsertionOrderedSet[Variable]:
return self._live_in_block[basicblock]
def live_out_of(self, basicblock: BasicBlock) -> InsertionOrderedSet[Variable]:
return self._live_out_block[basicblock]
def defs_phi_of(self, basicblock: BasicBlock) -> InsertionOrderedSet[Variable]:
return self._defs_phi_block[basicblock]
def _init_usages_definitions_of_blocks(self) -> None:
for basicblock in self._cfg:
for instruction in basicblock.instructions:
if isinstance(instruction, Phi):
self._defs_phi_block[basicblock].update(instruction.definitions)
for (pred_block, value) in instruction.origin_block.items():
if isinstance(value, Variable):
self._uses_phi_block[pred_block].add(value)
else:
self._defs_block[basicblock].update(instruction.definitions)
self._uses_block[basicblock].update(instruction.requirements)
def _explore_all_paths(self, basicblock: BasicBlock, variable: Variable) -> None:
if ((variable in self._defs_block[basicblock]) or (variable in self._live_in_block[basicblock])):
return
self._live_in_block[basicblock].add(variable)
if (variable in self._defs_phi_block[basicblock]):
return
for predecessor_block in self._cfg.get_predecessors(basicblock):
self._live_out_block[predecessor_block].add(variable)
self._explore_all_paths(predecessor_block, variable)
def _create_live_sets(self):
self._init_usages_definitions_of_blocks()
for basicblock in self._cfg.nodes:
for variable in self._uses_phi_block[basicblock]:
self._live_out_block[basicblock].add(variable)
self._explore_all_paths(basicblock, variable)
for variable in self._uses_block[basicblock]:
self._explore_all_paths(basicblock, variable)
if (None in self._uses_phi_block.keys()):
self._live_out_block[None] = self._uses_phi_block[None] |
def test_medium_dispersion():
m_PR = td.PoleResidue(eps_inf=1.0, poles=[(((- 1) + 2j), (1 + 3j)), (((- 2) + 4j), (1 + 5j))])
m_SM = td.Sellmeier(coeffs=[(2, 3), (2, 4)])
m_LZ = td.Lorentz(eps_inf=1.0, coeffs=[(1, 3, 2), (2, 4, 1)])
m_LZ2 = td.Lorentz(eps_inf=1.0, coeffs=[(1, 2, 3), (2, 1, 4)])
m_DR = td.Drude(eps_inf=1.0, coeffs=[(1, 3), (2, 4)])
m_DB = td.Debye(eps_inf=1.0, coeffs=[(1, 3), (2, 4)])
with pytest.raises(pydantic.ValidationError):
_ = td.Sellmeier(coeffs=[(2, 0), (2, 4)])
with pytest.raises(pydantic.ValidationError):
_ = td.Drude(eps_inf=1.0, coeffs=[(1, 0), (2, 4)])
with pytest.raises(pydantic.ValidationError):
_ = td.Debye(eps_inf=1.0, coeffs=[(1, 0), (2, 4)])
freqs = np.linspace(0.01, 1, 1001)
for medium in [m_PR, m_SM, m_LZ, m_LZ2, m_DR, m_DB]:
eps_c = medium.eps_model(freqs)
for medium in [m_SM, m_LZ, m_LZ2, m_DR, m_DB]:
eps_c = medium.eps_model(freqs)
assert np.all((eps_c.imag >= 0))
m_SM.eps_model(np.array([1, 2]))
poles = [(1, 0.1, 2, 5), (3, 0.4, 1, 0.4)]
m_LO_TO = td.PoleResidue.from_lo_to(poles=poles, eps_inf=2)
assert np.allclose(m_LO_TO.eps_model(freqs), td.PoleResidue.lo_to_eps_model(poles=poles, eps_inf=2, frequency=freqs)) |
class LedgerApiDialogues(Model, BaseLedgerApiDialogues):
def __init__(self, **kwargs: Any) -> None:
Model.__init__(self, **kwargs)
def role_from_first_message(message: Message, receiver_address: Address) -> BaseDialogue.Role:
return BaseLedgerApiDialogue.Role.AGENT
BaseLedgerApiDialogues.__init__(self, self_address=str(self.skill_id), role_from_first_message=role_from_first_message) |
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS, ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES, ofproto.OFPTFPT_NEXT_TABLES_MISS, ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS, ofproto.OFPTFPT_WRITE_ACTIONS_MISS, ofproto.OFPTFPT_APPLY_ACTIONS, ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_packet = ofproto.OFPTFPT_PACKET_TYPES
p_type_oxms = [ofproto.OFPTFPT_MATCH, ofproto.OFPTFPT_WILDCARDS, ofproto.OFPTFPT_WRITE_SETFIELD, ofproto.OFPTFPT_WRITE_SETFIELD_MISS, ofproto.OFPTFPT_APPLY_SETFIELD, ofproto.OFPTFPT_APPLY_SETFIELD_MISS, ofproto.OFPTFPT_WRITE_COPYFIELD, ofproto.OFPTFPT_WRITE_COPYFIELD_MISS, ofproto.OFPTFPT_APPLY_COPYFIELD, ofproto.OFPTFPT_APPLY_COPYFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER, ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = (t if (t != prop.type) else 'UNKNOWN')
if (prop.type in p_type_instructions):
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len, 'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif (prop.type in p_type_next_tables):
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif (prop.type in p_type_actions):
action_ids = []
for i in prop.action_ids:
act = i.to_jsondict()[i.__class__.__name__]
action_ids.append(act)
p['action_ids'] = action_ids
elif (prop.type in p_type_oxms):
oxm_ids = []
for i in prop.oxm_ids:
oxm = i.to_jsondict()[i.__class__.__name__]
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif (prop.type == p_type_packet):
oxm_values = []
for val in prop.oxm_values:
i = {val[0]: val[1]}
oxm_values.append(i)
p['oxm_values'] = oxm_values
elif (prop.type in p_type_experimenter):
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user) |
class IntegrationGrid():
points = None
h = None
_N = None
_dim = None
_runtime = None
def __init__(self, N, integration_domain, grid_func=grid_func, disable_integration_domain_check=False):
start = perf_counter()
self._check_inputs(N, integration_domain, disable_integration_domain_check)
backend = infer_backend(integration_domain)
if (backend == 'builtins'):
backend = 'torch'
integration_domain = _setup_integration_domain(len(integration_domain), integration_domain, backend=backend)
elif ('int' in str(integration_domain.dtype)):
dtype = to_backend_dtype('float64', like=backend)
integration_domain = astype(integration_domain, dtype)
self._dim = integration_domain.shape[0]
self._N = int(((N ** (1.0 / self._dim)) + 1e-08))
logger.opt(lazy=True).debug('Creating {dim}-dimensional integration grid with {N} points over {dom}', dim=(lambda : str(self._dim)), N=(lambda : str(N)), dom=(lambda : str(integration_domain)))
if hasattr(integration_domain, 'requires_grad'):
requires_grad = integration_domain.requires_grad
else:
requires_grad = False
grid_1d = []
for dim in range(self._dim):
grid_1d.append(grid_func(integration_domain[dim], self._N, requires_grad=requires_grad, backend=backend))
self.h = anp.stack([(grid_1d[dim][1] - grid_1d[dim][0]) for dim in range(self._dim)], like=integration_domain)
logger.opt(lazy=True).debug('Grid mesh width is {h}', h=(lambda : str(self.h)))
points = anp.meshgrid(*grid_1d)
self.points = anp.stack([mg.ravel() for mg in points], axis=1, like=integration_domain)
logger.info('Integration grid created.')
self._runtime = (perf_counter() - start)
def _check_inputs(self, N, integration_domain, disable_integration_domain_check):
logger.debug('Checking inputs to IntegrationGrid.')
if disable_integration_domain_check:
dim = len(integration_domain)
else:
dim = _check_integration_domain(integration_domain)
if (N < 2):
raise ValueError('N has to be > 1.')
if ((N ** (1.0 / dim)) < 2):
raise ValueError('Cannot create a ', dim, '-dimensional grid with ', N, ' points. Too few points per dimension.') |
(urls.POLICY_POST_WEBHOOK_DETAIL, status_code=HTTP_200_OK, dependencies=[Security(verify_oauth_client, scopes=[scopes.WEBHOOK_DELETE])], response_model=schemas.PolicyWebhookDeleteResponse)
def delete_post_execution_webhook(*, db: Session=Depends(deps.get_db), policy_key: FidesKey, post_webhook_key: FidesKey) -> schemas.PolicyWebhookDeleteResponse:
return delete_webhook(db=db, policy_key=policy_key, webhook_key=post_webhook_key, webhook_cls=PolicyPostWebhook) |
def runon30seconds():
procarr = []
for y in range(0, len(Settings.Controllers)):
if Settings.Controllers[y]:
if Settings.Controllers[y].enabled:
if Settings.Controllers[y].timer30s:
t = threading.Thread(target=Settings.Controllers[y].timer_thirty_second)
t.daemon = True
procarr.append(t)
t.start()
if (len(procarr) > 0):
for process in procarr:
process.join()
return 0 |
def get_current_ethdo_version():
version = UNKNOWN_VERSION
try:
process_result = subprocess.run([(ETHDO_INSTALLED_PATH + 'ethdo'), 'version'], capture_output=True, text=True)
process_output = ((process_result.stdout + '\n') + process_result.stderr)
version = process_output.strip()
except FileNotFoundError:
return False
return version |
class TestMassEditWalk(unittest.TestCase):
def setUp(self):
self.workspace = Workspace()
self.subdirectory = self.workspace.get_directory()
self.file_names = []
for ii in range(3):
file_name = self.workspace.get_file(parent_dir=self.subdirectory, extension='.txt')
with io.open(file_name, 'w+') as fh:
fh.write((unicode('some text ') + unicode(ii)))
self.file_names.append(file_name)
def tearDown(self):
self.workspace.cleanup()
def test_feature(self):
pass
def test_process_subdirectory_dry_run(self):
output = io.StringIO()
processed_files = massedit.edit_files(['*.txt'], expressions=["re.sub('text', 'blah blah', line)"], start_dirs=self.workspace.top_dir, output=output)
self.assertEqual(sorted(processed_files), sorted(self.file_names))
index = {}
for (ii, file_name) in enumerate(self.file_names):
with io.open(file_name) as fh:
new_lines = fh.readlines()
self.assertEqual(new_lines, [('some text ' + unicode(ii))])
index[file_name] = ii
actual = output.getvalue()
expected = ''.join([textwrap.dedent(' --- {}\n +++ <new>\n -1 +1 \n -some text {}+some blah blah {}').format(file_name, index[file_name], index[file_name]) for file_name in processed_files])
self.assertEqual(actual, expected)
def test_process_subdirectory_dry_run_with_one_change(self):
output = io.StringIO()
processed_files = massedit.edit_files(['*.txt'], expressions=["re.sub('text 1', 'blah blah 1', line)"], start_dirs=self.workspace.top_dir, output=output)
self.assertEqual(processed_files, self.file_names[1:2])
index = {}
for (ii, file_name) in enumerate(self.file_names):
with io.open(file_name) as fh:
new_lines = fh.readlines()
self.assertEqual(new_lines, [('some text ' + unicode(ii))])
index[file_name] = ii
actual = output.getvalue()
expected = ''.join([textwrap.dedent(' --- {}\n +++ <new>\n -1 +1 \n -some text {}+some blah blah {}').format(file_name, index[file_name], index[file_name]) for file_name in processed_files])
self.assertEqual(actual, expected)
def test_process_subdirectory(self):
arguments = ['-r', '-s', self.workspace.top_dir, '-w', '-e', "re.sub('text', 'blah blah', line)", '*.txt']
processed_files = massedit.command_line(arguments)
self.assertEqual(sorted(processed_files), sorted(self.file_names))
for (ii, file_name) in enumerate(self.file_names):
with io.open(file_name) as fh:
new_lines = fh.readlines()
self.assertEqual(new_lines, [('some blah blah ' + unicode(ii))])
def test_maxdepth_one(self):
arguments = ['-r', '-s', self.workspace.top_dir, '-w', '-e', "re.sub('text', 'blah blah', line)", '-m', '0', '*.txt']
processed_files = massedit.command_line(arguments)
self.assertEqual(processed_files, [])
for (ii, file_name) in enumerate(self.file_names):
with io.open(file_name) as fh:
new_lines = fh.readlines()
self.assertEqual(new_lines, [('some text ' + unicode(ii))]) |
.parametrize('data,expected', [({'hello': 'world'}, b'hello=world'), ({'number': [1, 2]}, b'number=1&number=2')])
def test_urlencoded_form_handler_serialize(data, expected):
handler = media.URLEncodedFormHandler()
assert (handler.serialize(data, falcon.MEDIA_URLENCODED) == expected)
value = falcon.async_to_sync(handler.serialize_async, data, falcon.MEDIA_URLENCODED)
assert (value == expected) |
class OptionSeriesBubbleDataEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
def extractMintmatchalatteBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestIndexListFilterAlias(TestCase):
def builder(self, key='2'):
self.client = Mock()
self.client.info.return_value = get_es_ver()
self.client.cat.indices.return_value = get_testvals(key, 'state')
self.client.indices.get_settings.return_value = get_testvals(key, 'settings')
self.client.indices.stats.return_value = get_testvals(key, 'stats')
self.client.indices.exists_alias.return_value = False
self.ilo = IndexList(self.client)
def test_raise(self):
self.builder(key='1')
self.assertRaises(MissingArgument, self.ilo.filter_by_alias)
def test_positive(self):
self.builder()
self.client.indices.get_alias.return_value = testvars.settings_2_get_aliases
self.ilo.filter_by_alias(aliases=['my_alias'])
self.assertEqual(sorted(list(testvars.settings_two.keys())), sorted(self.ilo.indices))
def test_negative(self):
self.builder()
self.client.indices.get_alias.return_value = {}
self.ilo.filter_by_alias(aliases=['not_my_alias'])
self.assertEqual(sorted([]), sorted(self.ilo.indices))
def test_get_alias_raises(self):
self.builder()
self.client.indices.get_alias.side_effect = testvars.get_alias_fail
self.client.indices.get_alias.return_value = testvars.settings_2_get_aliases
self.ilo.filter_by_alias(aliases=['my_alias'])
self.assertEqual(sorted([]), sorted(self.ilo.indices)) |
def remove_old_backups(bpath):
max_backups = get_setting_int('backups', 4)
versions = sorted([parse_version(version) for version in listdir(bpath)])
if (len(versions) < 2):
return
try:
installed_version = load_widevine_config()['version']
except TypeError:
log(2, 'could not determine installed version. Aborting cleanup of old versions.')
return
while (len(versions) > (max_backups + 1)):
remove_version = str((versions[1] if (versions[0] == parse_version(installed_version)) else versions[0]))
log(0, 'Removing oldest backup which is not installed: {version}', version=remove_version)
remove_tree(os.path.join(bpath, remove_version))
versions = sorted([parse_version(version) for version in listdir(bpath)])
return |
class Result():
difficulty: Any
base_fee: Any
state_root: Any = None
tx_root: Any = None
receipt_root: Any = None
withdrawals_root: Any = None
logs_hash: Any = None
bloom: Any = None
receipts: Any = None
rejected: Any = None
gas_used: Any = None
def to_json(self) -> Any:
data = {}
data['stateRoot'] = ('0x' + self.state_root.hex())
data['txRoot'] = ('0x' + self.tx_root.hex())
data['receiptsRoot'] = ('0x' + self.receipt_root.hex())
if self.withdrawals_root:
data['withdrawalsRoot'] = ('0x' + self.withdrawals_root.hex())
data['logsHash'] = ('0x' + self.logs_hash.hex())
data['logsBloom'] = ('0x' + self.bloom.hex())
data['gasUsed'] = hex(self.gas_used)
if self.difficulty:
data['currentDifficulty'] = hex(self.difficulty)
else:
data['currentDifficulty'] = None
if self.base_fee:
data['currentBaseFee'] = hex(self.base_fee)
else:
data['currentBaseFee'] = None
data['rejected'] = [{'index': idx, 'error': error} for (idx, error) in self.rejected.items()]
data['receipts'] = [{'transactionHash': item['transactionHash'], 'gasUsed': item['gasUsed']} for item in self.receipts]
return data |
class Migration(migrations.Migration):
dependencies = [('search', '0031_add_iija_spending_fields')]
operations = [migrations.AddField(model_name='awardsearch', name='pop_congressional_code_current', field=models.TextField(null=True)), migrations.AddField(model_name='awardsearch', name='recipient_location_congressional_code_current', field=models.TextField(null=True)), migrations.AddField(model_name='transactionsearch', name='pop_congressional_code_current', field=models.TextField(null=True)), migrations.AddField(model_name='transactionsearch', name='recipient_location_congressional_code_current', field=models.TextField(null=True))] |
def timeRange(start: Time_t, end: Time_t, step: float) -> Iterator[dt.datetime]:
assert (step > 0)
delta = dt.timedelta(seconds=step)
t = _fillDate(start)
tz = (dt.timezone.utc if t.tzinfo else None)
now = dt.datetime.now(tz)
while (t < now):
t += delta
while (t <= _fillDate(end)):
waitUntil(t)
(yield t)
t += delta |
class ShipitPathMap(object):
def __init__(self) -> None:
self.roots = []
self.mapping = []
self.exclusion = []
def add_mapping(self, fbsource_dir, target_dir) -> None:
self.roots.append(fbsource_dir)
self.mapping.append((fbsource_dir, target_dir))
def add_exclusion(self, pattern) -> None:
self.exclusion.append(re.compile(pattern))
def _minimize_roots(self) -> None:
self.roots.sort(key=len)
minimized = []
for r in self.roots:
add_this_entry = True
for existing in minimized:
if r.startswith((existing + '/')):
add_this_entry = False
break
if add_this_entry:
minimized.append(r)
self.roots = minimized
def _sort_mapping(self) -> None:
self.mapping.sort(reverse=True, key=(lambda x: len(x[0])))
def _map_name(self, norm_name, dest_root):
if (norm_name.endswith('.pyc') or norm_name.endswith('.swp')):
return None
for excl in self.exclusion:
if excl.match(norm_name):
return None
for (src_name, dest_name) in self.mapping:
if ((norm_name == src_name) or norm_name.startswith((src_name + '/'))):
rel_name = os.path.relpath(norm_name, src_name)
rel_name = os.path.normpath(rel_name)
if (dest_name == '.'):
return os.path.normpath(os.path.join(dest_root, rel_name))
dest_name = os.path.normpath(dest_name)
return os.path.normpath(os.path.join(dest_root, dest_name, rel_name))
raise Exception(('%s did not match any rules' % norm_name))
def mirror(self, fbsource_root, dest_root) -> ChangeStatus:
self._minimize_roots()
self._sort_mapping()
change_status = ChangeStatus()
full_file_list = set()
if (sys.platform == 'win32'):
def st_dev(path):
return 1
else:
def st_dev(path):
return os.lstat(path).st_dev
for fbsource_subdir in self.roots:
dir_to_mirror = os.path.join(fbsource_root, fbsource_subdir)
root_dev = st_dev(dir_to_mirror)
prefetch_dir_if_eden(dir_to_mirror)
if (not os.path.exists(dir_to_mirror)):
raise Exception(("%s doesn't exist; check your sparse profile!" % dir_to_mirror))
for (root, dirs, files) in os.walk(dir_to_mirror):
dirs[:] = [d for d in dirs if (root_dev == st_dev(os.path.join(root, d)))]
for src_file in files:
full_name = os.path.join(root, src_file)
rel_name = os.path.relpath(full_name, fbsource_root)
norm_name = rel_name.replace('\\', '/')
target_name = self._map_name(norm_name, dest_root)
if target_name:
full_file_list.add(target_name)
if copy_if_different(full_name, target_name):
change_status.record_change(target_name)
installed_name = os.path.join(dest_root, '.shipit_shipped')
if os.path.exists(installed_name):
with open(installed_name, 'rb') as f:
for name in f.read().decode('utf-8').splitlines():
name = name.strip()
if (name not in full_file_list):
print(('Remove %s' % name))
os.unlink(name)
change_status.record_change(name)
with open(installed_name, 'wb') as f:
for name in sorted(list(full_file_list)):
f.write(('%s\n' % name).encode('utf-8'))
return change_status |
def main():
screenWidth = 800
screenHeight = 450
set_config_flags(ConfigFlags.FLAG_MSAA_4X_HINT)
init_window(screenWidth, screenHeight, 'raylib [shapes] example - cubic-bezier lines')
start = Vector2(0, 0)
end = Vector2(screenWidth, screenHeight)
set_target_fps(60)
while (not window_should_close()):
if is_mouse_button_down(MouseButton.MOUSE_BUTTON_LEFT):
start = get_mouse_position()
if is_mouse_button_down(MouseButton.MOUSE_BUTTON_RIGHT):
end = get_mouse_position()
begin_drawing()
clear_background(RAYWHITE)
draw_text('USE MOUSE LEFT-RIGHT CLICK to DEFINE LINE START and END POINTS', 15, 20, 20, GRAY)
draw_line_bezier(start, end, 2.0, RED)
end_drawing()
close_window() |
class BulletPointDescriptor(TypeDescriptor[Iterable[str]]):
def visit_default(self, node: 'AbstractSchemaNode', **kwargs: Any) -> List[str]:
depth = kwargs['depth']
space = ('* ' + (depth * ' '))
return [f'{space}{node.id}: {node.__class__.__name__} # {node.description}']
def visit_object(self, node: Object, **kwargs: Any) -> List[str]:
depth = kwargs['depth']
code_lines = self.visit_default(node, depth=depth)
for child in node.attributes:
code_lines.extend(child.accept(self, depth=(depth + 1)))
return code_lines
def describe(self, node: Object) -> str:
code_lines = node.accept(self, depth=0)
return '\n'.join(code_lines) |
class invert_test_case(unittest.TestCase):
def test_invert_with_unique_values(self):
i = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
o = _invert(i)
r = {1: ['a'], 2: ['b'], 3: ['c'], 4: ['d'], 5: ['e']}
self.assertEqual(o, r)
def test_invert_with_flat_unique_values(self):
i = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
o = _invert(i, flat=True)
r = {1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e'}
self.assertEqual(o, r)
def test_invert_with_multiple_values(self):
i = {'a': 1, 'b': 2, 'c': 3, 'd': 1, 'e': 2, 'f': 3}
o = _invert(i)
self.assertTrue(('a' and ('d' in o[1])))
self.assertTrue(('b' and ('e' in o[2])))
self.assertTrue(('c' and ('f' in o[3])))
def test_invert_with_list_values(self):
i = {'a': ['x', 'y', 'z'], 'b': ['c', 'd', 'e']}
o = _invert(i)
r = {'x': ['a'], 'y': ['a'], 'z': ['a'], 'c': ['b'], 'd': ['b'], 'e': ['b']}
self.assertEqual(o, r)
ii = _invert(o)
self.assertTrue(('a' in ii))
self.assertTrue(('b' in ii))
self.assertEqual(len(ii.keys()), 2)
self.assertTrue(('x' in ii['a']))
self.assertTrue(('y' in ii['a']))
self.assertTrue(('z' in ii['a']))
self.assertEqual(len(ii['a']), 3)
self.assertTrue(('c' in ii['b']))
self.assertTrue(('d' in ii['b']))
self.assertTrue(('e' in ii['b']))
self.assertEqual(len(ii['b']), 3)
def test_invert_with_tuple_values(self):
i = {'a': ('x', 'y', 'z'), 'b': ('c', 'd', 'e')}
o = _invert(i)
r = {'x': ['a'], 'y': ['a'], 'z': ['a'], 'c': ['b'], 'd': ['b'], 'e': ['b']}
self.assertEqual(o, r) |
(IDataViewWidget)
class DataViewWidget(MDataViewWidget, LayoutWidget):
control_factory = Callable(DataViewTreeView)
selection_type = Enum('row', 'column', 'item')
selection_mode = Enum('extended', 'none', 'single')
control = Instance(QAbstractItemView)
_item_model = Instance(QAbstractItemModel)
def _create_item_model(self):
self._item_model = DataViewItemModel(self.data_model, self.selection_type, self.exporters)
def _get_control_header_visible(self):
return (not self.control.isHeaderHidden())
def _set_control_header_visible(self, header_visible):
self.control.setHeaderHidden((not header_visible))
def _get_control_selection_type(self):
qt_selection_type = self.control.selectionBehavior()
return pyface_selection_types[qt_selection_type]
def _set_control_selection_type(self, selection_type):
qt_selection_type = qt_selection_types[selection_type]
self.control.setSelectionBehavior(qt_selection_type)
self._item_model.selectionType = selection_type
def _get_control_selection_mode(self):
qt_selection_mode = self.control.selectionMode()
return pyface_selection_modes[qt_selection_mode]
def _set_control_selection_mode(self, selection_mode):
qt_selection_mode = qt_selection_modes[selection_mode]
self.control.setSelectionMode(qt_selection_mode)
def _get_control_selection(self):
indices = self.control.selectedIndexes()
if (self.selection_type == 'row'):
return self._item_model._extract_rows(indices)
elif (self.selection_type == 'column'):
return self._item_model._extract_columns(indices)
else:
return self._item_model._extract_indices(indices)
def _set_control_selection(self, selection):
selection_model = self.control.selectionModel()
select_flags = QItemSelectionModel.SelectionFlag.Select
qt_selection = QItemSelection()
if (self.selection_type == 'row'):
select_flags |= QItemSelectionModel.SelectionFlag.Rows
for (row, column) in selection:
index = self._item_model._to_model_index(row, (0,))
qt_selection.select(index, index)
elif (self.selection_type == 'column'):
select_flags |= QItemSelectionModel.SelectionFlag.Columns
for (row, column) in selection:
index = self._item_model._to_model_index((row + (0,)), column)
qt_selection.select(index, index)
else:
for (row, column) in selection:
index = self._item_model._to_model_index(row, column)
qt_selection.select(index, index)
selection_model.clearSelection()
selection_model.select(qt_selection, select_flags)
def _observe_control_selection(self, remove=False):
selection_model = self.control.selectionModel()
if remove:
try:
selection_model.selectionChanged.disconnect(self._update_selection)
except (TypeError, RuntimeError):
logger.info('selectionChanged already disconnected')
else:
selection_model.selectionChanged.connect(self._update_selection)
def _create_control(self, parent):
self._create_item_model()
control = self.control_factory(parent)
control._widget = self
control.setUniformRowHeights(True)
control.setAnimated(True)
control.setDragEnabled(True)
control.setModel(self._item_model)
control.setAcceptDrops(True)
control.setDropIndicatorShown(True)
return control
def destroy(self):
if (self.control is not None):
self.control.setModel(None)
self.control._widget = None
self._item_model = None
super().destroy()
('data_model', dispatch='ui')
def _update_item_model(self, event):
if (self._item_model is not None):
self._item_model.model = event.new
('exporters.items', dispatch='ui')
def _update_exporters(self, event):
if (self._item_model is not None):
self._item_model.exporters = self.exporters |
(python=PYTHON_VERSIONS)
def test_tools(session: Session) -> None:
_upgrade_basic(session)
install_cmd = ['pip', 'install']
session.install('pytest')
install_hydra(session, install_cmd)
tools = [x for x in sorted(os.listdir(os.path.join(BASE, 'tools'))) if (not os.path.isfile(x))]
for tool in tools:
tool_path = os.path.join('tools', tool)
session.chdir(BASE)
if (Path(tool_path) / 'setup.py').exists():
cmd = (list(install_cmd) + ['-e', tool_path])
session.run(*cmd, silent=SILENT)
session.run('pytest', tool_path)
session.chdir(BASE) |
def fix_event_and_speaker_images():
events = Event.query.filter(Event.original_image_url.isnot(None), or_((Event.thumbnail_image_url == None), (Event.large_image_url == None), (Event.icon_image_url == None))).all()
logger.info('Resizing images of %s events...', len(events))
for event in events:
logger.info('Resizing Event %s', event.id)
resize_event_images_task.delay(event.id, event.original_image_url)
speakers = Speaker.query.filter(Speaker.photo_url.isnot(None), or_((Speaker.icon_image_url == None), (Speaker.small_image_url == None), (Speaker.thumbnail_image_url == None))).all()
logger.info('Resizing images of %s speakers...', len(speakers))
for speaker in speakers:
logging.info('Resizing Speaker %s', speaker.id)
resize_speaker_images_task.delay(speaker.id, speaker.photo_url) |
def run(url_suffix='', retry=0):
url = sys.argv[0]
log.debug(('Running action: %s' % url))
if ('/clear_cookies' in url):
client = Client()
cookies = client._locate_cookies()
log.info(('Removing cookies from %s' % cookies))
if os.path.isfile(cookies):
os.remove(cookies)
log.info('Successfully removed cookies file') |
class Migration(migrations.Migration):
dependencies = [('users', '0002_auto__1548')]
operations = [migrations.AlterField(model_name='userprofile', name='current_level', field=models.IntegerField(choices=[(1, 'Level One'), (2, 'Level Two')], default=1)), migrations.AlterField(model_name='userprofile', name='phone', field=models.CharField(max_length=50, null=True)), migrations.AlterField(model_name='userprofile', name='timezone', field=models.CharField(max_length=50, null=True))] |
class TestLayout(BaseTestMixin, unittest.TestCase):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
_toolkit([ToolkitName.qt])
def test_qt_resizable_in_vgroup(self):
with reraise_exceptions(), create_ui(VResizeDialog()) as ui:
(editor,) = ui.get_editors('txt')
text = editor.control
self.assertGreater(text.width(), (_DIALOG_WIDTH - 100))
self.assertLess(text.height(), 100)
_toolkit([ToolkitName.qt])
def test_qt_resizable_in_hgroup(self):
with reraise_exceptions(), create_ui(HResizeDialog()) as ui:
(editor,) = ui.get_editors('txt')
text = editor.control
self.assertGreater(text.height(), (_DIALOG_HEIGHT - 100))
_toolkit([ToolkitName.qt])
def test_qt_resizable_readonly_item(self):
tester = UITester()
with tester.create_ui(ObjectWithResizeReadonlyItem()) as ui:
resizable_readonly_item = tester.find_by_name(ui, 'resizable_readonly_item')
self.assertLess(resizable_readonly_item._target.control.height(), _DIALOG_HEIGHT)
self.assertEqual(resizable_readonly_item._target.control.width(), _DIALOG_WIDTH) |
def main():
if (('-h' in sys.argv) or ('--help' in sys.argv)):
docopt(__doc__)
project_path = project.check_for_project('.')
if (project_path is None):
raise ProjectNotFound
project.main._add_to_sys_path(project_path)
pytest_args = sys.argv[(sys.argv.index('test') + 1):]
if ((not pytest_args) or pytest_args[0].startswith('-')):
structure_config = _load_project_structure_config(project_path)
pytest_args.insert(0, project_path.joinpath(structure_config['tests']).as_posix())
return_code = pytest.main(pytest_args, ['pytest-brownie'])
if return_code:
sys.exit(return_code) |
def check_fix_numbering(log, releases, series_id):
if (not isinstance(series_id, str)):
log.warning('Series id is not a string: %s -> %s', series_id, type(series_id))
assert isinstance(series_id, (str, int))
series_id = str(series_id)
conf = load_lut()
must_renumber = (series_id in conf['force_sequential_numbering'])
missing_chap = 0
for item in releases:
if (not (item['vol'] or item['chp'])):
missing_chap += 1
if releases:
unnumbered = ((missing_chap / len(releases)) * 100)
if (((len(releases) >= 5) and (unnumbered > 80)) or must_renumber):
if must_renumber:
log.warning('Item numbering force-overridden! Adding simple sequential chapter numbers.')
else:
log.warning('Item seems to not have numbered chapters. Adding simple sequential chapter numbers.')
chap = 1
for item in releases:
item['vol'] = None
item['chp'] = chap
chap += 1
return releases |
('flytekit.core.data_persistence.FileAccessProvider.put_data')
def test_optional_flytefile_in_dataclassjsonmixin(mock_upload_dir):
remote_path = 's3://tmp/file'
mock_upload_dir.return_value = remote_path
with tempfile.TemporaryFile() as f:
f.write(b'abc')
f1 = FlyteFile('f1', remote_path=remote_path)
o = TestFileStruct_optional_flytefile(a=f1, b=f1, b_prime=None, c=f1, d=[f1], e=[f1], e_prime=[None], f={'a': f1}, g={'a': f1}, g_prime={'a': None}, h=f1, i=A_optional_flytefile(a=42))
ctx = FlyteContext.current_context()
tf = DataclassTransformer()
lt = tf.get_literal_type(TestFileStruct_optional_flytefile)
lv = tf.to_literal(ctx, o, TestFileStruct_optional_flytefile, lt)
assert (lv.scalar.generic['a'].fields['path'].string_value == remote_path)
assert (lv.scalar.generic['b'].fields['path'].string_value == remote_path)
assert (lv.scalar.generic['b_prime'] is None)
assert (lv.scalar.generic['c'].fields['path'].string_value == remote_path)
assert (lv.scalar.generic['d'].values[0].struct_value.fields['path'].string_value == remote_path)
assert (lv.scalar.generic['e'].values[0].struct_value.fields['path'].string_value == remote_path)
assert (lv.scalar.generic['e_prime'].values[0].WhichOneof('kind') == 'null_value')
assert (lv.scalar.generic['f']['a'].fields['path'].string_value == remote_path)
assert (lv.scalar.generic['g']['a'].fields['path'].string_value == remote_path)
assert (lv.scalar.generic['g_prime']['a'] is None)
assert (lv.scalar.generic['h'].fields['path'].string_value == remote_path)
assert (lv.scalar.generic['h_prime'] is None)
assert (lv.scalar.generic['i'].fields['a'].number_value == 42)
assert (lv.scalar.generic['i_prime'].fields['a'].number_value == 99)
ot = tf.to_python_value(ctx, lv=lv, expected_python_type=TestFileStruct_optional_flytefile)
assert (o.a.path == ot.a.remote_source)
assert (o.b.path == ot.b.remote_source)
assert (ot.b_prime is None)
assert (o.c.path == ot.c.remote_source)
assert (o.d[0].path == ot.d[0].remote_source)
assert (o.e[0].path == ot.e[0].remote_source)
assert (o.e_prime == [None])
assert (o.f['a'].path == ot.f['a'].remote_source)
assert (o.g['a'].path == ot.g['a'].remote_source)
assert (o.g_prime == {'a': None})
assert (o.h.path == ot.h.remote_source)
assert (ot.h_prime is None)
assert (o.i == ot.i)
assert (o.i_prime == A_optional_flytefile(a=99)) |
def select(read_list, write_list, error_list, timeout=None):
if (timeout is not None):
try:
timeout = float(timeout)
except ValueError:
raise TypeError('Expected number for timeout')
hub = get_hub()
timers = []
current = eventlet.getcurrent()
assert (hub.greenlet is not current), 'do not call blocking functions from the mainloop'
ds = {}
for r in read_list:
ds[get_fileno(r)] = {'read': r}
for w in write_list:
ds.setdefault(get_fileno(w), {})['write'] = w
for e in error_list:
ds.setdefault(get_fileno(e), {})['error'] = e
listeners = []
def on_read(d):
original = ds[get_fileno(d)]['read']
current.switch(([original], [], []))
def on_write(d):
original = ds[get_fileno(d)]['write']
current.switch(([], [original], []))
def on_timeout2():
current.switch(([], [], []))
def on_timeout():
timers.append(hub.schedule_call_global(0, on_timeout2))
if (timeout is not None):
timers.append(hub.schedule_call_global(timeout, on_timeout))
try:
for (k, v) in ds.items():
if v.get('read'):
listeners.append(hub.add(hub.READ, k, on_read, current.throw, (lambda : None)))
if v.get('write'):
listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, (lambda : None)))
try:
return hub.switch()
finally:
for l in listeners:
hub.remove(l)
finally:
for t in timers:
t.cancel() |
class Solution(object):
def checkPossibility(self, nums):
def is_non_decreasing(ns):
p = None
for n in ns:
if (p is None):
p = n
continue
if (n < p):
return False
p = n
return True
p = None
for (i, n) in enumerate(nums):
if (p is None):
p = n
continue
if (n < p):
ns1 = list(nums)
ns1[i] = p
ns2 = list(nums)
ns2[(i - 1)] = n
return (is_non_decreasing(ns1) or is_non_decreasing(ns2))
p = n
return True |
.django_db
def test_tas_with_no_program_activity(client, monkeypatch, tas_with_no_object_class, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
tas = '001-X-0000-000'
resp = client.get(url.format(tas=tas, query_params=''))
expected_result = {'fiscal_year': helpers.get_mocked_current_fiscal_year(), 'treasury_account_symbol': tas, 'messages': [], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 0}, 'results': []}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result) |
def test_non_utf_8_body_in_ignored_paths_with_capture_body(app, elasticapm_client):
client = TestClient(app)
elasticapm_client.config.update(1, capture_body='all', transaction_ignore_urls='/hello')
response = client.post('/hello', data=b'b$\x19\xc2')
assert (response.status_code == 200)
assert (len(elasticapm_client.events[constants.TRANSACTION]) == 0) |
def write_json_files(stubs_dict):
class BytesEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return base64.b64encode(obj).decode('ascii')
return json.JSONEncoder.default(self, obj)
for (filename, stub_data) in stubs_dict.items():
with open(os.path.join(BUILD_DIR, filename), 'w') as outfile:
json.dump(stub_data, outfile, cls=BytesEncoder, indent=4) |
def test_missing_cursor_value(response_with_body):
config = CursorPaginationConfiguration(cursor_param='after', field='hash')
request_params: SaaSRequestParams = SaaSRequestParams(method=HTTPMethod.GET, path='/conversations')
paginator = CursorPaginationStrategy(config)
next_request: SaaSRequestParams = paginator.get_next_request(request_params, {}, response_with_body, 'conversations')
assert (next_request is None) |
class Solution():
def sumRootToLeaf(self, root: Optional[TreeNode]) -> int:
def sumBinary(node, s):
if (node is None):
return s
ns = ((s + s) + node.val)
if ((node.left is None) and (node.right is None)):
return ns
elif (node.left is None):
return sumBinary(node.right, ns)
elif (node.right is None):
return sumBinary(node.left, ns)
else:
return (sumBinary(node.right, ns) + sumBinary(node.left, ns))
return sumBinary(root, 0) |
def longest_consecutive_sequence(seq: str, char: str) -> int:
assert (len(char) == 1)
longest = 0
current_streak = 0
for c in seq:
if (c == char):
current_streak += 1
else:
current_streak = 0
if (current_streak > longest):
longest = current_streak
return longest |
def test_iou_score():
box1 = [(0, 0), (100, 0), (100, 100), (0, 100)]
box2 = [(50, 50), (100, 50), (100, 100), (50, 100)]
assert (keras_ocr.evaluation.iou_score(box1, box2) == 0.25)
box2 = [(100, 100), (200, 100), (200, 200), (100, 200)]
assert (keras_ocr.evaluation.iou_score(box1, box2) == 0.0) |
def test_get_registration_stats(db, client, jwt):
speaker = SpeakerFactory()
db.session.add(speaker)
db.session.commit()
response = client.get('/v1/user-check-in/stats/event/1?session_ids=1', headers=jwt)
result = {'session_stats': [{'check_in': 0, 'check_out': 0, 'manual_count': {}, 'session_id': '1', 'session_name': 'example', 'speakers': [], 'track_name': 'example'}], 'total_attendee': 0, 'total_not_checked_in': 0, 'total_registered': 0, 'total_session_checked_in': 0, 'total_session_checked_out': 0, 'total_track_checked_in': 0, 'total_track_checked_out': 0, 'track_stats': []}
assert (response.status_code == 200)
assert (json.loads(response.data) == result) |
class GaussianTimeOutSimulator(TimeOutSimulator):
def __init__(self, **kwargs):
init_self_cfg(self, component_class=__class__, config_class=GaussianTimeOutSimulatorConfig, **kwargs)
super().__init__(**kwargs)
self.duration_distribution_generator = instantiate(self.cfg.duration_distribution_generator)
self._num_users_tracked = 0
self._num_users_succeed = 0
self._sample_mean_per_user = 0.0
self._sample_sec_moment_per_user = 0.0
self._sample_var_per_user = 0.0
self._fl_stopping_time = self.cfg.fl_stopping_time
self._fl_total_elapse_time = 0.0
def _set_defaults_in_cfg(cls, cfg):
pass
def fl_stopping_time(self):
return self._fl_stopping_time
def sample_mean_per_user(self):
return self._sample_mean_per_user
def sample_var_per_user(self):
return self._sample_var_per_user
def simulate_per_example_training_time(self) -> float:
return self.duration_distribution_generator.bounded_gaussian_sample()
def simulate_training_time(self, device_perf: float, num_samples: int) -> float:
return min([(device_perf * num_samples), self.cfg.timeout_wall_per_round])
def track_training_time_distribution(self, one_user_training_time: float) -> None:
self._num_users_tracked += 1
self._sample_mean_per_user = (((self._sample_mean_per_user * (self._num_users_tracked - 1)) + one_user_training_time) / self._num_users_tracked)
self._sample_sec_moment_per_user = (((self._sample_sec_moment_per_user * (self._num_users_tracked - 1)) + (one_user_training_time ** 2)) / self._num_users_tracked)
if (self._num_users_tracked > 1):
self._sample_var_per_user = ((self._num_users_tracked / (self._num_users_tracked - 1)) * (self._sample_sec_moment_per_user - (self._sample_mean_per_user ** 2)))
def user_timeout(self, training_time: float) -> bool:
return (training_time >= self.cfg.timeout_wall_per_round)
def stop_fl(self) -> bool:
return (self._fl_total_elapse_time >= self._fl_stopping_time)
def track_fl_elapsed_time(self, training_time_in_round: List[float]) -> None:
self._fl_total_elapse_time += min([self.cfg.timeout_wall_per_round, max(training_time_in_round)]) |
def test_default_setup_provider_is_ansible(tmpdir):
wd = tmpdir.strpath
ssh_key_path = os.path.join(wd, 'id_rsa.pem')
with open(ssh_key_path, 'w+') as ssh_key_file:
ssh_key_file.write('')
ssh_key_file.flush()
config_path = os.path.join(wd, 'config.ini')
with open(config_path, 'w+') as config_file:
config_file.write((((make_config_snippet('cloud', 'openstack') + make_config_snippet('cluster', 'example_openstack', 'setup=setup_no_ansible')) + make_config_snippet('login', 'ubuntu', keyname='test', valid_path=ssh_key_path)) + '\n[setup/setup_no_ansible]\nfrontend_groups = slurm_master\ncompute_groups = slurm_worker\n '))
creator = make_creator(config_path)
setup = creator.create_setup_provider('example_openstack')
from elasticluster.providers.ansible_provider import AnsibleSetupProvider
assert isinstance(setup, AnsibleSetupProvider) |
def read_data(inFile):
with open(inFile, 'r', encoding='utf-8') as fd:
line = fd.readline().strip()
if line.startswith('Filetype:'):
a = line.split(':', 1)
if (not a[1].strip().startswith(FLIPPER_NFC_FILETYPE)):
print(f'Error: {inFile} is not a Flipper NFC data file')
if _debug:
print(f'>>{line}<<')
sys.exit(1)
else:
print(f'Error: {inFile} is not a Flipper NFC data file')
if _debug:
print(f'>>>{line}<<<')
sys.exit(1)
for line in fd:
line = line.strip()
if ((not line) or (line[0] == '#')):
continue
if line.startswith('Device type:'):
a = line.split(':', 1)
if (not a[1].strip().startswith('Mifare')):
print(f'Error: {inFile} is not a Mifare data file: {a[1].strip()}')
if _debug:
print(f'>>>>{line}<<<<')
print(f'>>>>{a[0]}<<<<')
print(f'>>>>{a[1]}<<<<')
sys.exit(1)
if line.startswith('Block'):
b = line.split(':', 1)[1].strip()
blk_data.append(b.split()) |
def _replace_contraction_with_var_where_possible(instruction: Instruction):
for expr in _find_cast_subexpressions(instruction):
if (_is_cast(expr.operand) and expr.contraction):
if (expr.type.size < expr.operand.type.size):
if (expr.type.size == expr.operand.operand.type.size):
instruction.substitute(expr, expr.operand.operand) |
_action_type(ofproto.OFPAT_COPY_FIELD, ofproto.OFP_ACTION_COPY_FIELD_SIZE)
class OFPActionCopyField(OFPAction):
def __init__(self, n_bits=0, src_offset=0, dst_offset=0, oxm_ids=None, type_=None, len_=None):
oxm_ids = (oxm_ids if oxm_ids else [])
super(OFPActionCopyField, self).__init__()
self.n_bits = n_bits
self.src_offset = src_offset
self.dst_offset = dst_offset
assert (len(oxm_ids) == 2)
self.oxm_ids = []
for i in oxm_ids:
if isinstance(i, OFPOxmId):
i.hasmask = False
self.oxm_ids.append(i)
elif isinstance(i, six.text_type):
self.oxm_ids.append(OFPOxmId(i, hasmask=False))
else:
raise ValueError(('invalid value for oxm_ids: %s' % oxm_ids))
def parser(cls, buf, offset):
(type_, len_, n_bits, src_offset, dst_offset) = struct.unpack_from(ofproto.OFP_ACTION_COPY_FIELD_PACK_STR, buf, offset)
offset += ofproto.OFP_ACTION_COPY_FIELD_SIZE
rest = buf[offset:(offset + len_)]
oxm_ids = []
while rest:
(i, rest) = OFPOxmId.parse(rest)
oxm_ids.append(i)
return cls(n_bits, src_offset, dst_offset, oxm_ids, type_, len_)
def serialize(self, buf, offset):
oxm_ids_buf = b''
for i in self.oxm_ids:
oxm_ids_buf += i.serialize()
action_len = (ofproto.OFP_ACTION_COPY_FIELD_SIZE + len(oxm_ids_buf))
self.len = utils.round_up(action_len, 8)
pad_len = (self.len - action_len)
msg_pack_into(ofproto.OFP_ACTION_COPY_FIELD_PACK_STR, buf, offset, self.type, self.len, self.n_bits, self.src_offset, self.dst_offset)
buf += (oxm_ids_buf + (b'\x00' * pad_len)) |
class TestSmithWatermanDecoder(unittest.TestCase):
def setUp(self):
if torch.cuda.is_available():
cuda_device = torch.device('cuda')
torch.manual_seed(2)
(B, S, N, M) = (3, 3, 5, 5)
self.theta = torch.rand(B, N, M, requires_grad=True, dtype=torch.float32, device=cuda_device)
self.Ztheta = torch.rand(B, N, M, requires_grad=True, dtype=torch.float32, device=cuda_device)
self.A = ((- 1.0) * torch.ones_like(self.theta, dtype=torch.float32, device=cuda_device))
(self.B, self.S, self.N, self.M) = (B, S, N, M)
self.operator = 'softmax'
(torch.cuda.is_available(), 'No GPU was detected')
def test_grad_smithwaterman_function(self):
needle = SmithWatermanDecoder(self.operator)
(theta, A) = (self.theta, self.A)
theta.requires_grad_()
gradcheck(needle, (theta, A), eps=0.1, atol=0.1, rtol=0.1)
(torch.cuda.is_available(), 'No GPU was detected')
def test_decoding(self):
theta = torch.tensor(make_data().astype(np.float32), device=self.theta.device).unsqueeze(0)
theta.requires_grad_()
A = (0.1 * torch.ones_like(theta, dtype=torch.float32, device=self.theta.device))
needle = SmithWatermanDecoder(self.operator)
v = needle(theta, A)
v.backward()
decoded = needle.traceback(theta.grad.squeeze())
decoded = [(x[0], x[1]) for x in decoded]
states = [(0, 0), (0, 1), (1, 1), (2, 1), (3, 1), (4, 2), (4, 3)]
self.assertListEqual(states, decoded)
(torch.cuda.is_available(), 'No GPU was detected')
def test_decoding2(self):
X = 'HECDRKTCDESFSTKGNLRVHKLGH'
Y = 'LKCSGCGKNFKSQYAYKRHEQTH'
needle = SmithWatermanDecoder(self.operator)
dm = torch.Tensor(np.loadtxt(get_data_path('dm.txt')))
decoded = needle.traceback(dm)
(pred_x, pred_y, pred_states) = list(zip(*decoded))
states2alignment(np.array(pred_states), X, Y) |
class OptionPlotoptionsSunburstSonificationTracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
('new')
('--username', '-u', help='The username of the user.')
('--email', '-e', type=EmailType(), help='The email address of the user.')
('--password', '-p', help='The password of the user.')
('--group', '-g', help='The group of the user.', type=click.Choice(['admin', 'super_mod', 'mod', 'member']))
def new_user(username, email, password, group):
try:
user = prompt_save_user(username, email, password, group)
click.secho('[+] User {} with Email {} in Group {} created.'.format(user.username, user.email, user.primary_group.name), fg='cyan')
except IntegrityError:
raise FlaskBBCLIError("Couldn't create the user because the username or email address is already taken.", fg='red') |
class TestLocalizedDecimalField(TestCase):
_settings(LANGUAGE_CODE='pl')
def test_to_internal_value(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
assert (field.to_internal_value('1,1') == Decimal('1.1'))
_settings(LANGUAGE_CODE='pl')
def test_to_representation(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
assert (field.to_representation(Decimal('1.1')) == '1,1')
def test_localize_forces_coerce_to_string(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, coerce_to_string=False, localize=True)
assert isinstance(field.to_representation(Decimal('1.1')), str) |
.parametrize('params', (['t', 'u'], ['u', 't']))
.parametrize('levels', ([500, 850], [850, 500]))
.parametrize('source_name', ['indexed-directory'])
def test_indexing_to_xarray(params, levels, source_name):
request = dict(level=levels, variable=params, date=, time='1200')
(ds, __tmp, total, n) = get_fixtures(source_name, {})
ds = ds.sel(**request)
ds = ds.order_by(level=levels, variable=params)
assert (len(ds) == n), len(ds)
ds.to_xarray() |
class OptionPlotoptionsNetworkgraphSonificationTracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _get_valid_extension(fname: str) -> str:
valid_extensions = ['.json', '.yaml', '.hdf5', '.h5', '.hdf5.gz']
extensions = [s.lower() for s in pathlib.Path(fname).suffixes[(- 2):]]
if (len(extensions) == 0):
raise FileError(f"File '{fname}' missing extension.")
single_extension = extensions[(- 1)]
if (single_extension in valid_extensions):
return single_extension
double_extension = ''.join(extensions)
if (double_extension in valid_extensions):
return double_extension
raise FileError(f"File extension must be one of {', '.join(valid_extensions)}; file '{fname}' does not match any of those.") |
def extractNovelfanatistWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
_in_both()
def test_calllater():
def x1():
print('called later')
def x2(i):
print('called with', i)
def x3(i, j):
print('called with', i, 'and', j)
loop.call_soon(x1)
loop.call_soon(x1)
print('xx')
loop.iter()
loop.call_soon(x2, 3)
loop.call_soon(x2, 4)
print('xx')
loop.iter()
loop.call_soon(x3, 3, 4)
loop.call_soon(x3, 5, 6)
print('xx')
loop.iter() |
class ACL():
def __init__(self, faucetconfgetsetter):
self.logger = logging.getLogger('acl')
self.frpc = faucetconfgetsetter
def _config_file_paths(self, file_paths):
return [self.frpc.config_file_path(f) for f in file_paths]
def include_acl_files(self, rules_doc, rules_file, coprocess_rules_files, obj_doc):
files = self._config_file_paths(rules_doc['include'])
rules_path = rules_file.rsplit('/', 1)[0]
conf_files = obj_doc.get('include', [])
acls_docs = {}
for f in files:
if f.startswith('/'):
acls_doc = self.frpc.read_faucet_conf(f)
else:
acls_doc = self.frpc.read_faucet_conf(os.path.join(rules_path, f))
if isinstance(acls_doc, bool):
self.logger.warning('Include file {0} was not found, ACLs may not be working as expected'.format(f))
continue
acls_docs[f] = acls_doc
if conf_files:
acls_filenames = []
if coprocess_rules_files:
acls_filenames += self._config_file_paths(coprocess_rules_files)
for f in files:
if ('/' in f):
acls_filenames.append(f.rsplit('/', 1)[1])
else:
acls_filenames.append(f)
for conf_file in conf_files:
if (conf_file.startswith('poseidon') and (conf_file not in acls_filenames)):
obj_doc['include'].remove(conf_file)
self.logger.info('Removing {0} from config'.format(conf_file))
else:
obj_doc['include'] = []
for (f, acls_doc) in acls_docs.items():
if ('/' in f):
(_, acls_filename) = f.rsplit('/', 1)
else:
acls_filename = f
poseidon_acls_filename = ('poseidon_' + acls_filename)
if (poseidon_acls_filename not in conf_files):
obj_doc['include'].append(poseidon_acls_filename)
self.frpc.write_faucet_conf(os.path.join(rules_path, poseidon_acls_filename), acls_doc)
self.logger.info('Adding {0} to config'.format(acls_filename))
acl_names = []
for acls_doc in acls_docs.values():
acl_names.extend(list(acls_doc.get('acls', [])))
return (obj_doc, acl_names)
def match_rules(self, rule, rules, obj_doc, endpoint, switch, port, all_rule_acls, force_apply_rules):
matches = 0
for r in rules[rule]:
if (('rule' in r) and ('device_key' in r['rule'])):
rule_data = r['rule']
if (rule_data['device_key'] == 'os'):
match = False
for addresses in ('ipv4_addresses', 'ipv6_addresses'):
if ('addresses' in endpoint.metadata):
for (ip, ip_metadata) in endpoint.metadata['addresses']:
if (('os' in ip_metadata) and (ip_metadata['os'] == rule_data['value'])):
self.logger.info('{0} os match: {1} {2}, rule: {3}'.format(addresses, ip, rule_data['value'], rule))
match = True
if match:
matches += 1
elif (rule_data['device_key'] == 'role'):
match = False
if ('mac_addresses' in endpoint.metadata):
for (mac, mac_metadata) in endpoint.metadata['mac_addresses'].items():
most_recent = 0
for record in mac_metadata:
if (float(record) > most_recent):
most_recent = float(record)
most_recent = str(most_recent)
if ((most_recent != '0') and (most_recent in mac_metadata) and ('labels' in mac_metadata[most_recent]) and ('confidences' in mac_metadata[most_recent])):
for i in range(3):
if (mac_metadata[most_recent]['labels'][i] == rule_data['value']):
if ('min_confidence' in rule_data['value']):
if ((float(mac_metadata[most_recent]['confidences'][i]) * 100) >= rule_data['min_confidence']):
self.logger.info('Confidence match: {0} {1}, rule: {2}'.format(mac, (float(mac_metadata[most_recent]['confidences'][i]) * 100), rule))
match = True
else:
self.logger.info('Role match: {0} {1}, rule: {2}'.format(mac, rule_data['value'], rule))
match = True
if match:
matches += 1
if ((matches == len(rules[rule])) or (force_apply_rules and (rule in force_apply_rules))):
rule_acls = []
for r in rules[rule]:
rule_acls += r['rule']['acls']
all_rule_acls += r['rule']['acls']
rule_acls = list(set(rule_acls))
interfaces_conf = obj_doc['dps'][switch]['interfaces']
if rule_acls:
if (port not in interfaces_conf):
interfaces_conf[port] = {}
port_conf = interfaces_conf[port]
if ('acls_in' not in port_conf):
self.logger.info('All rules met for: {0} on switch: {1} and port: {2}; applying ACLs: {3}'.format(endpoint.endpoint_data['mac'], switch, port, rule_acls))
port_conf['acls_in'] = rule_acls
else:
orig_rule_acls = rule_acls
rule_acls += port_conf['acls_in']
rule_acls = list(set(rule_acls))
if (port_conf['acls_in'] != rule_acls):
port_conf['acls_in'] = rule_acls
self.logger.info('All rules met for: {0} on switch: {1} and port: {2}; applying ACLs: {3}'.format(endpoint.endpoint_data['mac'], switch, port, orig_rule_acls))
return (obj_doc, all_rule_acls)
def apply_acls(self, rules_file, endpoints, force_apply_rules, force_remove_rules, coprocess_rules_files, obj_doc, rules_doc):
if (not endpoints):
return obj_doc
if ('include' not in rules_doc):
self.logger.info('No included ACLs files in the rules file, using ACLs that Faucet already knows about')
else:
(obj_doc, acl_names) = self.include_acl_files(rules_doc, rules_file, coprocess_rules_files, obj_doc)
if ('rules' in rules_doc):
acls = []
rules = rules_doc['rules']
for rule in rules:
for r in rules[rule]:
acls += r['rule']['acls']
acls = list(set(acls))
if ('include' in rules_doc):
for acl in acls:
if (acl not in acl_names):
self.logger.info('Using named ACL: {0}, but it was not found in included ACL files, assuming ACL name exists in Faucet config'.format(acl))
for endpoint in endpoints:
port = int(endpoint.endpoint_data['port'])
switch = endpoint.endpoint_data['segment']
switch_conf = obj_doc['dps'].get(switch, None)
if (not switch_conf):
continue
port_conf = switch_conf['interfaces'].get(port, None)
if (not port_conf):
continue
existing_acls = port_conf.get('acls_in', None)
if (not existing_acls):
continue
all_rule_acls = []
for rule in rules:
(obj_doc, all_rule_acls) = self.match_rules(rule, rules, obj_doc, endpoint, switch, port, all_rule_acls, force_apply_rules)
all_rule_acls = list(set(all_rule_acls))
for acl in existing_acls:
if ((acl in acls) and ((acl not in all_rule_acls) or (acl in force_remove_rules))):
port_conf['acls_in'].remove(acl)
self.logger.info('Removing no longer needed ACL: {0} for: {1} on switch: {2} and port: {3}'.format(acl, endpoint.endpoint_data['mac'], switch, port))
return obj_doc |
('sys.argv', ['flakehell'])
def test_exceptions(capsys, tmp_path: Path):
text = '\n [tool.flakehell.plugins]\n pyflakes = ["+*"]\n\n [tool.flakehell.exceptions."tests/"]\n pyflakes = ["-F401"]\n '
(tmp_path / 'pyproject.toml').write_text(dedent(text))
(tmp_path / 'example.py').write_text('import sys\na')
(tmp_path / 'tests').mkdir()
((tmp_path / 'tests') / 'test_example.py').write_text('import sys\na')
with chdir(tmp_path):
result = main(['lint', '--format', 'default'])
assert (result == (1, ''))
captured = capsys.readouterr()
assert (captured.err == '')
exp = "\n ./example.py:1:1: F401 'sys' imported but unused\n ./example.py:2:1: F821 undefined name 'a'\n ./tests/test_example.py:2:1: F821 undefined name 'a'\n "
assert (captured.out.strip() == dedent(exp).strip()) |
class Schedule(object):
def __init__(self, ctx):
self.ctx = ctx
self.events = []
self.passed = 0.0
self.clock = 0.0
self.last_clock = 0
self.started = False
def pending(self):
return len(self.events)
def add(self, e):
self.events.append(e)
def clear(self):
assert False
self.events = []
def clear_channel(self, ch):
assert False
self.events = [ev for ev in self.events if (ev.ch != ch)]
def logic(self, t):
processed = 0
self.passed = 0
try:
self.events = sorted(self.events, key=(lambda e: e.t))
for ev in self.events:
if (ev.t > 1.0):
ev.t -= 1.0
else:
if (ev.t >= 0.0):
if (self.ctx.cansleep and (self.ctx.startrow == (- 1))):
self.ctx.t += ((self.ctx.speed * t) * (ev.t - self.passed))
time.sleep(max(0, ((self.ctx.speed * t) * (ev.t - self.passed))))
ev.func(0)
self.passed = ev.t
else:
ev.func(0)
processed += 1
slp = (t * (1.0 - self.passed))
if (slp > 0.0):
self.ctx.t += (self.ctx.speed * slp)
if (self.ctx.cansleep and (self.ctx.startrow == (- 1))):
time.sleep(max(0, (self.ctx.speed * slp)))
self.passed = 0.0
self.events = self.events[processed:]
except KeyboardInterrupt:
self.events = self.events[processed:]
raise
except SignalError:
self.events = self.events[processed:]
raise
except EOFError:
self.events = self.events[processed:]
raise |
class Solution():
def maxNumberOfBalloons(self, text: str) -> int:
base = Counter('balloon')
tr = {}
for a in text:
if (a not in base):
continue
tr[a] = (tr.get(a, 0) + 1)
multiple = len(text)
for k in base.keys():
if (k not in tr):
return 0
multiple = min(multiple, (tr[k] // base[k]))
return multiple |
class GenerateActionMetaData():
RECENCY_SLOPE = 30
def __init__(self, index_name, type_name, conflicting_ids=None, conflict_probability=None, on_conflict=None, recency=None, rand=random.random, randint=random.randint, randexp=random.expovariate, use_create=False):
if type_name:
self.meta_data_index_with_id = ('{"index": {"_index": "%s", "_type": "%s", "_id": "%s"}}\n' % (index_name, type_name, '%s'))
self.meta_data_update_with_id = ('{"update": {"_index": "%s", "_type": "%s", "_id": "%s"}}\n' % (index_name, type_name, '%s'))
self.meta_data_index_no_id = ('{"index": {"_index": "%s", "_type": "%s"}}\n' % (index_name, type_name))
else:
self.meta_data_index_with_id = ('{"index": {"_index": "%s", "_id": "%s"}}\n' % (index_name, '%s'))
self.meta_data_update_with_id = ('{"update": {"_index": "%s", "_id": "%s"}}\n' % (index_name, '%s'))
self.meta_data_index_no_id = ('{"index": {"_index": "%s"}}\n' % index_name)
self.meta_data_create_no_id = ('{"create": {"_index": "%s"}}\n' % index_name)
if (use_create and conflicting_ids):
raise exceptions.RallyError("Index mode '_create' cannot be used with conflicting ids")
self.conflicting_ids = conflicting_ids
self.on_conflict = on_conflict
self.use_create = use_create
self.conflict_probability = ((conflict_probability / 100.0) if (conflict_probability is not None) else 0)
self.recency = (recency if (recency is not None) else 0)
self.rand = rand
self.randint = randint
self.randexp = randexp
self.id_up_to = 0
def is_constant(self):
return (self.conflicting_ids is None)
def __iter__(self):
return self
def __next__(self):
if (self.conflicting_ids is not None):
if (self.conflict_probability and (self.id_up_to > 0) and (self.rand() <= self.conflict_probability)):
if (self.recency == 0):
idx = self.randint(0, (self.id_up_to - 1))
else:
idx_range = min(self.randexp((GenerateActionMetaData.RECENCY_SLOPE * self.recency)), 1)
idx = round(((self.id_up_to - 1) * (1 - idx_range)))
doc_id = self.conflicting_ids[idx]
action = self.on_conflict
else:
if (self.id_up_to >= len(self.conflicting_ids)):
raise StopIteration()
doc_id = self.conflicting_ids[self.id_up_to]
self.id_up_to += 1
action = 'index'
if (action == 'index'):
return ('index', (self.meta_data_index_with_id % doc_id))
elif (action == 'update'):
return ('update', (self.meta_data_update_with_id % doc_id))
else:
raise exceptions.RallyAssertionError(f'Unknown action [{action}]')
else:
if self.use_create:
return ('create', self.meta_data_create_no_id)
return ('index', self.meta_data_index_no_id) |
class ERC20IndexedPattern(DeclarationUtils, AbstractAstPattern):
name = 'ERC20 Indexed Pattern'
description = "Events defined by ERC20 specification should use the 'indexed' keyword."
severity = Severity.LOW
tags = {}
def find_matches(self) -> List[PatternMatch]:
ast_root = self.get_ast_root()
events = ast_root.find_descendants_of_type(EventDefinition)
for e in events:
if (e.name in self.ERC20_events):
matches = [re.match('.* indexed .*', p.src_code) for p in e.parameters.parameters[:2]]
if all(matches):
(yield self.match_compliant().with_info(MatchComment(f'{e.name} event is a compliant ERC20 event.'), *self.ast_node_info(e)))
else:
(yield self.match_violation().with_info(MatchComment(f'{e.name} event is an ERC20 event and as such, it should contain indexed keyword in the first two arguments'), *self.ast_node_info(e)))
ERC20_events = {'Transfer', 'Approval'} |
.requires_window_manager
def test_analyse_success(mock_storage, qtbot, ert_mock):
(target, source) = mock_storage
analyse = Analyse(ert_mock, target, source)
thread = QThread()
with qtbot.waitSignals([analyse.finished, thread.finished], timeout=2000, raising=True):
analyse.moveToThread(thread)
thread.started.connect(analyse.run)
analyse.finished.connect(thread.quit)
thread.start() |
class RefreshSequencer(Module):
def __init__(self, cmd, trp, trfc, postponing=1):
self.start = Signal()
self.done = Signal()
executer = RefreshExecuter(cmd, trp, trfc)
self.submodules += executer
count = Signal(bits_for(postponing), reset=(postponing - 1))
self.sync += [If(self.start, count.eq(count.reset)).Elif(executer.done, If((count != 0), count.eq((count - 1))))]
self.comb += executer.start.eq((self.start | (count != 0)))
self.comb += self.done.eq((executer.done & (count == 0))) |
(expression='^Finished Phase (?P<phase>\\d+) in (?P<duration>[^ ]+) seconds.')
def phase_finished(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
major = int(match.group('phase'))
duration = float(match.group('duration'))
duration_dict = {f'phase{major}_duration_raw': duration}
return attr.evolve(info, phase=plotman.job.Phase(major=(major + 1), minor=0), **duration_dict) |
.parametrize('estimator, cv, threshold, scoring, dropped_features, performances', _model_and_expectations)
def test_classification(estimator, cv, threshold, scoring, dropped_features, performances, df_test):
(X, y) = df_test
sel = RecursiveFeatureAddition(estimator=estimator, cv=cv, threshold=threshold, scoring=scoring)
sel.fit(X, y)
Xtransformed = X.copy()
Xtransformed = Xtransformed.drop(labels=dropped_features, axis=1)
assert (sel.features_to_drop_ == dropped_features)
assert (len(sel.performance_drifts_.keys()) == len(X.columns))
assert all([(var in sel.performance_drifts_.keys()) for var in X.columns])
rounded_perfs = {key: round(sel.performance_drifts_[key], 4) for key in sel.performance_drifts_}
assert (rounded_perfs == performances)
pd.testing.assert_frame_equal(sel.transform(X), Xtransformed) |
def bulk_run_args__init__(self, device_to_commands=None, timeout=bulk_run_args.thrift_spec[3][4], open_timeout=bulk_run_args.thrift_spec[4][4], client_ip=bulk_run_args.thrift_spec[10][4], client_port=bulk_run_args.thrift_spec[11][4], uuid=bulk_run_args.thrift_spec[12][4]):
self.device_to_commands = device_to_commands
self.timeout = timeout
self.open_timeout = open_timeout
self.client_ip = client_ip
self.client_port = client_port
self.uuid = uuid |
def _test_generate_delft_with_multiple_tokens_tei_only(tmp_path: Path, model_name: str, file_suffix: str, tei_root: etree.ElementBase, tokens: Sequence[str], expected_labels: Sequence[str], data_generator: ModelDataGenerator, layout_document: Optional[LayoutDocument]=None):
tei_source_path = (tmp_path / 'tei')
output_path = (tmp_path / 'output.data')
tei_source_path.mkdir(parents=True, exist_ok=True)
(tei_source_path / f'sample{file_suffix}.tei.xml').write_bytes(etree.tostring(tei_root))
main([f'--model-name={model_name}', f'--tei-source-path={tei_source_path}/*.tei.xml', f'--delft-output-path={output_path}'])
assert output_path.exists()
if (layout_document is None):
layout_document = LayoutDocument.for_blocks([LayoutBlock.for_text(' '.join(tokens))])
expected_data_lines = list(data_generator.iter_data_lines_for_layout_document(layout_document))
(_expected_texts, expected_features) = load_data_crf_lines(expected_data_lines)
LOGGER.debug('expected_features: %r', expected_features)
(texts, labels, features) = load_data_and_labels_crf_file(str(output_path))
LOGGER.debug('texts: %r', texts)
LOGGER.debug('labels: %r', labels)
LOGGER.debug('features: %r', features)
LOGGER.debug('training tei: %r', etree.tostring(tei_root))
assert (len(texts) == 1)
assert (list(texts[0]) == tokens)
assert (list(labels[0]) == expected_labels)
assert (features.tolist() == expected_features.tolist()) |
class TestGlobalScriptContainer(unittest.TestCase):
def test_init_with_no_scripts(self):
gsc = containers.GlobalScriptContainer()
self.assertEqual(len(gsc.loaded_data), 0)
_settings(GLOBAL_SCRIPTS={})
def test_start_with_no_scripts(self):
gsc = containers.GlobalScriptContainer()
gsc.start()
self.assertEqual(len(gsc.typeclass_storage), 0)
_settings(GLOBAL_SCRIPTS={'script_name': {}})
def test_start_with_typeclassless_script(self):
gsc = containers.GlobalScriptContainer()
gsc.start()
self.assertEqual(len(gsc.typeclass_storage), 1)
self.assertIn('script_name', gsc.typeclass_storage)
self.assertEqual(gsc.typeclass_storage['script_name'], _BASE_TYPECLASS)
_settings(GLOBAL_SCRIPTS={'script_name': {'typeclass': 'evennia.utils.tests.test_containers.NoScript'}})
def test_start_with_nonexistent_script(self):
gsc = containers.GlobalScriptContainer()
gsc.start()
self.assertEqual(len(gsc.typeclass_storage), 1)
self.assertIn('script_name', gsc.typeclass_storage)
self.assertEqual(gsc.typeclass_storage['script_name'], _BASE_TYPECLASS)
_settings(GLOBAL_SCRIPTS={'script_name': {'typeclass': 'evennia.utils.tests.test_containers.GoodScript'}})
def test_start_with_valid_script(self):
gsc = containers.GlobalScriptContainer()
gsc.start()
self.assertEqual(len(gsc.typeclass_storage), 1)
self.assertIn('script_name', gsc.typeclass_storage)
self.assertEqual(gsc.typeclass_storage['script_name'], GoodScript)
_settings(GLOBAL_SCRIPTS={'script_name': {'typeclass': 'evennia.utils.tests.test_containers.InvalidScript'}})
def test_start_with_invalid_script(self):
gsc = containers.GlobalScriptContainer()
with self.assertRaises(AttributeError) as err:
gsc.start()
self.assertTrue(str(err.exception).startswith("type object 'InvalidScript' has no attribute"), err.exception)
_settings(GLOBAL_SCRIPTS={'script_name': {'typeclass': 'evennia.utils.tests.data.broken_script.BrokenScript'}})
def test_start_with_broken_script(self):
gsc = containers.GlobalScriptContainer()
with self.assertRaises(Exception) as err:
gsc.start()
self.assertTrue(str(err.exception).startswith("cannot import name 'nonexistent_module' from 'evennia'"), err.exception) |
class Demo(lg.Graph):
GENERATOR: Generator
VIZ: SimpleVizGroup
def setup(self) -> None:
self.GENERATOR.configure(GeneratorConfig(sample_rate=SAMPLE_RATE, num_features=NUM_FEATURES))
def connections(self) -> lg.Connections:
return ((self.GENERATOR.OUTPUT, self.VIZ.INPUT), (self.GENERATOR.OUTPUT, self.VIZ.INPUT))
def process_modules(self) -> Tuple[(lg.Module, ...)]:
return (self.GENERATOR, self.VIZ) |
class TestOFPQueueStatsReply(unittest.TestCase):
class Datapath(object):
ofproto = ofproto
ofproto_parser = ofproto_v1_0_parser
c = OFPQueueStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': b'\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': b'\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = (ofproto.OFP_STATS_MSG_SIZE + ofproto.OFP_QUEUE_STATS_SIZE)
msg_len = {'buf': b'\x00,', 'val': msg_len_val}
xid = {'buf': b'\x19\xfc(l', 'val': }
buf = (((version['buf'] + msg_type['buf']) + msg_len['buf']) + xid['buf'])
type_ = {'buf': b'\x00\x05', 'val': ofproto.OFPST_QUEUE}
flags = {'buf': b';+', 'val': 15147}
buf += (type_['buf'] + flags['buf'])
port_no = {'buf': b'\xe7k', 'val': 59243}
zfill = (b'\x00' * 2)
queue_id = {'buf': b'*\xa8\x7f2', 'val': }
tx_bytes = {'buf': b'w\xe1\xd5c\x18\xaec\xaa', 'val': }
tx_packets = {'buf': b"'\xa4A\xd7\xd4S\x9eB", 'val': }
tx_errors = {'buf': b'W2\x08/\', 'val': }
buf += (((((port_no['buf'] + zfill) + queue_id['buf']) + tx_bytes['buf']) + tx_packets['buf']) + tx_errors['buf'])
res = OFPQueueStatsReply.parser(object, version['val'], msg_type['val'], msg_len['val'], xid['val'], buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
body = res.body[0]
eq_(port_no['val'], body.port_no)
eq_(queue_id['val'], body.queue_id)
eq_(tx_bytes['val'], body.tx_bytes)
eq_(tx_packets['val'], body.tx_packets)
eq_(tx_errors['val'], body.tx_errors)
def test_serialize(self):
pass |
def find_referenced_fides_keys(resource: object) -> Set[FidesKey]:
referenced_fides_keys: Set[FidesKey] = set()
if (isinstance(resource, str) and (not isinstance(resource, Enum))):
return set()
signature = inspect.signature(type(resource), follow_wrapped=True)
attributes = filter((lambda parameter: hasattr(resource, parameter.name)), signature.parameters.values())
for attribute in attributes:
attribute_value = resource.__getattribute__(attribute.name)
if attribute_value:
if (attribute.annotation in (FidesKey, Optional[FidesKey])):
referenced_fides_keys.add(attribute_value)
elif (attribute.annotation == List[FidesKey]):
referenced_fides_keys.update(resource.__getattribute__(attribute.name))
elif (isinstance(attribute_value, list) and (attribute.annotation != List[str])):
nested_keys = find_nested_keys_in_list(attribute_value)
referenced_fides_keys.update(nested_keys)
elif isinstance(attribute_value, BaseModel):
referenced_fides_keys.update(find_referenced_fides_keys(attribute_value))
return referenced_fides_keys |
class ScopeableStatsProxy(object):
EXTENDABLE_FUNC = ['incr', 'decr', 'timing', 'timer', 'gauge', 'set']
def __init__(self, client, prefix=None):
self._client = client
self._scope_prefix = prefix
for extendable_func in self.EXTENDABLE_FUNC:
base_func = getattr(self._client, extendable_func)
if base_func:
setattr(self, extendable_func, self._create_wrapped_function(base_func))
def _create_wrapped_function(self, base_func):
if self._scope_prefix:
def name_wrap(stat, *args, **kwargs):
tags = kwargs.pop('tags', {})
if kwargs.pop('per_host', False):
tags['_f'] = 'i'
if bool(tags):
stat = self._serialize_tags(stat, tags)
return base_func(self._p_with_prefix(stat), *args, **kwargs)
else:
def name_wrap(stat, *args, **kwargs):
tags = kwargs.pop('tags', {})
if kwargs.pop('per_host', False):
tags['_f'] = 'i'
if bool(tags):
stat = self._serialize_tags(stat, tags)
return base_func(stat, *args, **kwargs)
return name_wrap
def get_stats(self, name):
if ((not self._scope_prefix) or (self._scope_prefix == '')):
prefix = name
else:
prefix = ((self._scope_prefix + '.') + name)
return ScopeableStatsProxy(self._client, prefix)
def pipeline(self):
return ScopeableStatsProxy(self._client.pipeline(), self._scope_prefix)
def _p_with_prefix(self, name):
if (name is None):
return name
return ((self._scope_prefix + '.') + name)
def _is_ascii(self, name):
if (sys.version_info >= (3, 7)):
return name.isascii()
try:
return (name and name.encode('ascii'))
except UnicodeEncodeError:
return False
def _serialize_tags(self, metric, tags=None):
stat = metric
if tags:
for key in sorted(tags):
try:
key = str(key)
if (not self._is_ascii(key)):
return stat
tag = FORBIDDEN_TAG_VALUE_CHARACTERS.sub('_', str(tags[key]))
if (tag != ''):
metric += '.__{0}={1}'.format(key, tag)
except UnicodeEncodeError:
return stat
return metric
def __hasattr__(self, name):
return hasattr(self._client, name)
def __getattr__(self, name):
return getattr(self._client, name)
def __enter__(self):
return ScopeableStatsProxy(self._client.__enter__(), self._scope_prefix)
def __exit__(self, exc_type, exc_value, traceback):
self._client.__exit__(exc_type, exc_value, traceback) |
_defaults()
class PageSchema(Schema):
class Meta():
type_ = 'page'
self_view = 'v1.page_detail'
self_view_kwargs = {'id': '<id>'}
inflect = dasherize
id = fields.Str(dump_only=True)
name = fields.Str(required=True)
title = fields.Str(allow_none=True)
url = fields.String(required=True)
description = fields.Str(allow_none=True)
place = fields.Str(validate=validate.OneOf(choices=['footer', 'event']), allow_none=True)
language = fields.Str(allow_none=True)
index = fields.Integer(default=0) |
.parametrize('amount, min_deposit_size, success', [((2000 * (10 ** 18)), (2000 * (10 ** 18)), True), ((1000 * (10 ** 18)), (100 * (10 ** 18)), True), ((1500 * (10 ** 18)), (1499 * (10 ** 18)), True), (1, 1, True), ((999 * (10 ** 18)), (1000 * (10 ** 18)), False), ((10 * (10 ** 18)), (1500 * (10 ** 18)), False), (0, 1, False)])
def test_deposit(casper, concise_casper, funded_account, validation_key, amount, success, deposit_validator, new_epoch, assert_tx_failed):
start_epoch = concise_casper.START_EPOCH()
new_epoch()
assert (concise_casper.current_epoch() == (start_epoch + 1))
assert (concise_casper.next_validator_index() == 1)
if (not success):
assert_tx_failed((lambda : deposit_validator(funded_account, validation_key, amount)))
return
deposit_validator(funded_account, validation_key, amount)
assert (concise_casper.next_validator_index() == 2)
assert (concise_casper.validator_indexes(funded_account) == 1)
assert (concise_casper.deposit_size(1) == amount)
for i in range(2):
new_epoch()
assert (concise_casper.dynasty() == 2)
assert (concise_casper.total_curdyn_deposits_in_wei() == amount)
assert (concise_casper.total_prevdyn_deposits_in_wei() == 0) |
def main():
parser = argparse.ArgumentParser(prog='3d_diagrams', description='Plot 3D gamut in a different color spaces.')
parser.add_argument('--space', '-s', help='Desired space.')
parser.add_argument('--gamut', '-g', default='srgb', help='Gamut space to render space in. Gamut space must be bounded and must have channels in the range [0, 1].As only a shell is rendered for the gamut, the target space should be less than or equal to the size of the target gamut or there will be areas that do not render. Cylindrical spaces based specifically off an RGB gamut, such as HSL being based on sRGB, will only be done under the related gamut and will ignore this option.')
parser.add_argument('--opacity', default=1.0, type=float, help='opacity')
parser.add_argument('--resolution', '-r', default='200', help='How densely to render the figure. Some spaces need higher resolution to flesh out certain areas, but it comes at the cost of speed. Minimum is 60, default is 200.')
parser.add_argument('--pos', '-p', default=None, help="Position of camara 'x:y:z'")
parser.add_argument('--edges', '-e', action='store_true', help='Plot edges.')
parser.add_argument('--title', '-t', default='', help='Provide a title for the diagram.')
parser.add_argument('--dark', action='store_true', help='Use dark theme.')
parser.add_argument('--output', '-o', default='', help='Output file.')
parser.add_argument('--height', '-H', type=int, default=800, help='Height')
parser.add_argument('--width', '-W', type=int, default=800, help='Width')
parser.add_argument('--azimuth', '-A', type=float, default=45, help='Camera X position')
parser.add_argument('--elevation', '-E', type=float, default=45, help='Camera Y position')
parser.add_argument('--distance', '-D', type=float, default=2.5, help='Camera Z position')
parser.add_argument('--aspect-ratio', '-R', default='1:1:1', help='Aspect ratio. Set to 0:0:0 to leave aspect ratio untouched.')
parser.add_argument('--projection', '-P', default='perspective', help='Projection mode, perspective or orthographic')
args = parser.parse_args()
aspect = {k: float(v) for (k, v) in zip(['x', 'y', 'z'], args.aspect_ratio.split(':'))}
fig = plot_gamut_in_space(args.space, args.gamut, title=args.title, dark=args.dark, resolution=max(8, int(args.resolution)), opacity=args.opacity, edges=args.edges, size=(args.width, args.height), camera={'a': args.azimuth, 'e': args.elevation, 'r': args.distance}, aspect=aspect, projection=args.projection)
if fig:
if args.output:
filetype = os.path.splitext(args.output)[1].lstrip('.').lower()
if (filetype == 'html'):
with open(args.output, 'w') as f:
f.write(io.to_html(fig))
elif (filetype == 'json'):
io.write_json(fig, args.output)
else:
with open(args.output, 'wb') as f:
f.write(fig.to_image(format=filetype))
else:
fig.show()
return 0
return 1 |
class OptionSeriesTimelineDragdropGuideboxDefault(Options):
def className(self):
return self._config_get('highcharts-drag-box-default')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('rgba(0, 0, 0, 0.1)')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get('move')
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#888')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(900)
def zIndex(self, num: float):
self._config(num, js_type=False) |
.usefixtures('use_tmpdir')
.parametrize('queue_system, queue_system_option', [('LSF', 'LSF_SERVER'), ('SLURM', 'SQUEUE'), ('TORQUE', 'QUEUE')])
def test_initializing_empty_config_queue_options_resets_to_default_value(queue_system, queue_system_option):
filename = 'config.ert'
with open(filename, 'w', encoding='utf-8') as f:
f.write('NUM_REALIZATIONS 1\n')
f.write(f'''QUEUE_SYSTEM {queue_system}
''')
f.write(f'''QUEUE_OPTION {queue_system} {queue_system_option}
''')
f.write(f'''QUEUE_OPTION {queue_system} MAX_RUNNING
''')
config_object = ErtConfig.from_file(filename)
driver = Driver.create_driver(config_object.queue_config)
assert (driver.get_option(queue_system_option) == '')
assert (driver.get_option('MAX_RUNNING') == '0')
for options in config_object.queue_config.queue_options[queue_system]:
assert isinstance(options, tuple) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.